421098b3ys5GAr4z6_H1jD33oem82g xen/arch/ia64/irq.c
421098b3Heh72KuoVlND3CH6c0B0aA xen/arch/ia64/lib/Makefile
421098b3O0MYMUsmYVFy84VV_1gFwQ xen/arch/ia64/mm_init.c
+425ae516skiHBZU-Kfwxv2YWXfNRWQ xen/arch/ia64/patch/linux-2.6.11/bootmem.h
+425ae516maKAsHBJVSzs19cdRgt3Nw xen/arch/ia64/patch/linux-2.6.11/cpumask.h
+425ae516rHybgKj6KsvBOE0c_Hruxg xen/arch/ia64/patch/linux-2.6.11/current.h
+425ae516cGqvMzGtihTEsQXAXsuOhQ xen/arch/ia64/patch/linux-2.6.11/efi.c
+425ae516Y1A4q4_Kfre3qnDj7lbHJg xen/arch/ia64/patch/linux-2.6.11/entry.S
+425ae516RoFheL-Ua-EOtFqmLxoc9g xen/arch/ia64/patch/linux-2.6.11/hardirq.h
+425ae516PDO1ESDHXHVeDNvlqUfmdQ xen/arch/ia64/patch/linux-2.6.11/head.S
+425ae5163aiWdc1IZNsON6ruE2-n9g xen/arch/ia64/patch/linux-2.6.11/hpsim_irq.c
+425ae516JR7HWvt1zxJ-wLvEWmJGgg xen/arch/ia64/patch/linux-2.6.11/hpsim_ssc.h
+425ae516AHRNmaVuZjJY-9YjmKRDqg xen/arch/ia64/patch/linux-2.6.11/interrupt.h
+425ae516U2wFUzrUJQUpy3z38jZHsQ xen/arch/ia64/patch/linux-2.6.11/io.h
+425ae516nXL2iTzpziIaSLi3N257qQ xen/arch/ia64/patch/linux-2.6.11/irq.h
+425ae516GGRmXijPBLC5ii6yWOn0rg xen/arch/ia64/patch/linux-2.6.11/irq_ia64.c
+425ae516qQA5dHuIybqfN3nEzM_Zvg xen/arch/ia64/patch/linux-2.6.11/ivt.S
+425ae516_UhrTa3Y8wDG7fTgX6pNYA xen/arch/ia64/patch/linux-2.6.11/kernel-time.c
+425ae516atiECmpn_6nZDw4kkmbJ6g xen/arch/ia64/patch/linux-2.6.11/kregs.h
+425ae516lwlYwHG1Jv93kC3tfU5caw xen/arch/ia64/patch/linux-2.6.11/lds.S
+425ae516UGTH2xC56DEIlHSrPH4oxg xen/arch/ia64/patch/linux-2.6.11/linuxextable.c
+425ae516txAP-owjzpTJ7ThfzWR8nw xen/arch/ia64/patch/linux-2.6.11/linuxhardirq.h
+425ae516kNsO5yYnBHvAISdvCkQ2-g xen/arch/ia64/patch/linux-2.6.11/linuxtime.h
+425ae516Je2zI-Iw30_uGhvUYdlCZQ xen/arch/ia64/patch/linux-2.6.11/mca_asm.h
+425ae5160-9wHxh0tOnIjavEjt6W0A xen/arch/ia64/patch/linux-2.6.11/minstate.h
+425ae516N7SaORdbodDr90tmtCzYXw xen/arch/ia64/patch/linux-2.6.11/mm_contig.c
+425ae516YcBgoZ3xCTEmhCrgX8CjCA xen/arch/ia64/patch/linux-2.6.11/mmzone.h
+425ae516WDLrfEA4zr40d00z0VIWPg xen/arch/ia64/patch/linux-2.6.11/page.h
+425ae516pVQ75NhdItT593SiWI0lbQ xen/arch/ia64/patch/linux-2.6.11/pal.S
+425ae516QfmjiF_a-mabAXqV8Imzkg xen/arch/ia64/patch/linux-2.6.11/pgalloc.h
+425ae516EWaNOBEnc1xnphTbRmNZsw xen/arch/ia64/patch/linux-2.6.11/processor.h
+425ae5165sks4NwRldZOV_p63fspYw xen/arch/ia64/patch/linux-2.6.11/sal.h
+425ae516LecDyXlwh3NLBtHZKXmMcA xen/arch/ia64/patch/linux-2.6.11/series
+425ae516RFiPn2CGkpJ21LM-1lJcQg xen/arch/ia64/patch/linux-2.6.11/setup.c
+425ae516-xgihgqPEv-Aq8-9cNoCXg xen/arch/ia64/patch/linux-2.6.11/slab.h
+425ae516FX_10YaKGMU8Ysf7kkdm_A xen/arch/ia64/patch/linux-2.6.11/swiotlb.c
+425ae516p4ICTkjqNYEfYFxqULj4dw xen/arch/ia64/patch/linux-2.6.11/system.h
+425ae516juUB257qrwUdsL9AsswrqQ xen/arch/ia64/patch/linux-2.6.11/time.c
+425ae5167zQn7zYcgKtDUDX2v-e8mw xen/arch/ia64/patch/linux-2.6.11/tlb.c
+425ae5162bIl2Dgd19x-FceB4L9oGw xen/arch/ia64/patch/linux-2.6.11/types.h
+425ae516cFUNY2jHD46bujcF5NJheA xen/arch/ia64/patch/linux-2.6.11/unaligned.c
+425ae516viTtlyQjWHf6kBmq8KcwiQ xen/arch/ia64/patch/linux-2.6.11/wait.h
421098b39QFMC-1t1r38CA7NxAYBPA xen/arch/ia64/patch/linux-2.6.7/bootmem.h
421098b3SIA1vZX9fFUjo1T3o_jMCQ xen/arch/ia64/patch/linux-2.6.7/current.h
421098b3ZBl80iPuSeDU_Id5AgZl0w xen/arch/ia64/patch/linux-2.6.7/efi.c
struct mm_struct *mm = d->arch.mm;
struct page *p = (struct page *)0;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
extern unsigned long vhpt_paddr, vhpt_pend;
}
pgd = pgd_offset(mm,mpaddr);
if (pgd_none(*pgd))
- pgd_populate(mm, pgd, pmd_alloc_one(mm,mpaddr));
+ pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
- pmd = pmd_offset(pgd, mpaddr);
+ pud = pud_offset(pgd, mpaddr);
+ if (pud_none(*pud))
+ pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
+
+ pmd = pmd_offset(pud, mpaddr);
if (pmd_none(*pmd))
pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
// pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
{
struct mm_struct *mm = d->arch.mm;
pgd_t *pgd = pgd_offset(mm, mpaddr);
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
#endif
tryagain:
if (pgd_present(*pgd)) {
- pmd = pmd_offset(pgd,mpaddr);
- if (pmd_present(*pmd)) {
- pte = pte_offset_map(pmd,mpaddr);
- if (pte_present(*pte)) {
+ pud = pud_offset(pgd,mpaddr);
+ if (pud_present(*pud)) {
+ pmd = pmd_offset(pud,mpaddr);
+ if (pmd_present(*pmd)) {
+ pte = pte_offset_map(pmd,mpaddr);
+ if (pte_present(*pte)) {
//printk("lookup_domain_page: found mapping for %lx, pte=%lx\n",mpaddr,pte_val(*pte));
- return *(unsigned long *)pte;
+ return *(unsigned long *)pte;
+ }
}
- }
+ }
}
/* if lookup fails and mpaddr is "legal", "create" the page */
if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
-unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
+fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
{
- /*
- * We ack quickly, we don't want the irq controller
- * thinking we're snobs just because some other CPU has
- * disabled global interrupts (we have already done the
- * INT_ACK cycles, it's too late to try to pretend to the
- * controller that we aren't taking the interrupt).
- *
- * 0 return value means that this irq is already being
- * handled by some other CPU. (or is disabled)
- */
- irq_desc_t *desc = irq_descp(irq);
+ irq_desc_t *desc = irq_desc + irq;
struct irqaction * action;
- irqreturn_t action_ret;
unsigned int status;
- int cpu;
-
- cpu = smp_processor_id(); /* for CONFIG_PREEMPT, this must come after irq_enter()! */
#ifndef XEN
- kstat_cpu(cpu).irqs[irq]++;
+ kstat_this_cpu.irqs[irq]++;
#endif
-
if (desc->status & IRQ_PER_CPU) {
- /* no locking required for CPU-local interrupts: */
+ irqreturn_t action_ret;
+
+ /*
+ * No locking required for CPU-local interrupts:
+ */
desc->handler->ack(irq);
action_ret = handle_IRQ_event(irq, regs, desc->action);
+#ifndef XEN
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
+#endif
desc->handler->end(irq);
- } else {
- spin_lock(&desc->lock);
- desc->handler->ack(irq);
- /*
- * REPLAY is when Linux resends an IRQ that was dropped earlier
- * WAITING is used by probe to mark irqs that are being tested
- */
+ return 1;
+ }
+
+ spin_lock(&desc->lock);
+ desc->handler->ack(irq);
+ /*
+ * REPLAY is when Linux resends an IRQ that was dropped earlier
+ * WAITING is used by probe to mark irqs that are being tested
+ */
#ifdef XEN
- status = desc->status & ~IRQ_REPLAY;
+ status = desc->status & ~IRQ_REPLAY;
#else
- status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
#endif
- status |= IRQ_PENDING; /* we _want_ to handle it */
+ status |= IRQ_PENDING; /* we _want_ to handle it */
- /*
- * If the IRQ is disabled for whatever reason, we cannot
- * use the action we have.
- */
- action = NULL;
- if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
- action = desc->action;
- status &= ~IRQ_PENDING; /* we commit to handling */
- status |= IRQ_INPROGRESS; /* we are handling it */
- }
- desc->status = status;
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
- /*
- * If there is no IRQ handler or it was disabled, exit early.
- * Since we set PENDING, if another processor is handling
- * a different instance of this same irq, the other processor
- * will take care of it.
- */
- if (unlikely(!action))
- goto out;
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ * Since we set PENDING, if another processor is handling
+ * a different instance of this same irq, the other processor
+ * will take care of it.
+ */
+ if (unlikely(!action))
+ goto out;
- /*
- * Edge triggered interrupts need to remember
- * pending events.
- * This applies to any hw interrupts that allow a second
- * instance of the same irq to arrive while we are in do_IRQ
- * or in the handler. But the code here only handles the _second_
- * instance of the irq, not the third or fourth. So it is mostly
- * useful for irq hardware that does not mask cleanly in an
- * SMP environment.
- */
- for (;;) {
- spin_unlock(&desc->lock);
- action_ret = handle_IRQ_event(irq, regs, action);
- spin_lock(&desc->lock);
+ /*
+ * Edge triggered interrupts need to remember
+ * pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in do_IRQ
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ irqreturn_t action_ret;
+
+ spin_unlock(&desc->lock);
+
+ action_ret = handle_IRQ_event(irq, regs, action);
+
+ spin_lock(&desc->lock);
#ifndef XEN
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ if (!noirqdebug)
+ note_interrupt(irq, desc, action_ret);
#endif
- if (!(desc->status & IRQ_PENDING))
- break;
- desc->status &= ~IRQ_PENDING;
- }
- desc->status &= ~IRQ_INPROGRESS;
- out:
- /*
- * The ->end() handler has to deal with interrupts which got
- * disabled while the handler was running.
- */
- desc->handler->end(irq);
- spin_unlock(&desc->lock);
+ if (likely(!(desc->status & IRQ_PENDING)))
+ break;
+ desc->status &= ~IRQ_PENDING;
}
+ desc->status &= ~IRQ_INPROGRESS;
+
+out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ desc->handler->end(irq);
+ spin_unlock(&desc->lock);
+
return 1;
}
/////////////////////////////////////////////
//following from linux/mm/memory.c
+#ifndef __ARCH_HAS_4LEVEL_HACK
+/*
+ * Allocate page upper directory.
+ *
+ * We've already handled the fast-path in-line, and we own the
+ * page table lock.
+ *
+ * On a two-level or three-level page table, this ends up actually being
+ * entirely optimized away.
+ */
+pud_t fastcall *__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ pud_t *new;
+
+ spin_unlock(&mm->page_table_lock);
+ new = pud_alloc_one(mm, address);
+ spin_lock(&mm->page_table_lock);
+ if (!new)
+ return NULL;
+
+ /*
+ * Because we dropped the lock, we should re-check the
+ * entry, as somebody else could have populated it..
+ */
+ if (pgd_present(*pgd)) {
+ pud_free(new);
+ goto out;
+ }
+ pgd_populate(mm, pgd, new);
+ out:
+ return pud_offset(pgd, address);
+}
+
/*
* Allocate page middle directory.
*
* On a two-level page table, this ends up actually being entirely
* optimized away.
*/
-pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+pmd_t fastcall *__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
{
pmd_t *new;
* Because we dropped the lock, we should re-check the
* entry, as somebody else could have populated it..
*/
- if (pgd_present(*pgd)) {
+ if (pud_present(*pud)) {
pmd_free(new);
goto out;
}
- pgd_populate(mm, pgd, new);
-out:
- return pmd_offset(pgd, address);
+ pud_populate(mm, pud, new);
+ out:
+ return pmd_offset(pud, address);
}
+#endif
pte_t fastcall * pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
{
--- /dev/null
+ bootmem.h | 2 ++
+ 1 files changed, 2 insertions(+)
+
+Index: linux-2.6.11/include/linux/bootmem.h
+===================================================================
+--- linux-2.6.11.orig/include/linux/bootmem.h 2005-03-02 01:38:25.000000000 -0600
++++ linux-2.6.11/include/linux/bootmem.h 2005-03-19 12:39:36.915887729 -0600
+@@ -41,7 +41,9 @@ extern unsigned long __init init_bootmem
+ extern void __init free_bootmem (unsigned long addr, unsigned long size);
+ extern void * __init __alloc_bootmem (unsigned long size, unsigned long align, unsigned long goal);
+ #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
++#ifndef XEN
+ extern void __init reserve_bootmem (unsigned long addr, unsigned long size);
++#endif
+ #define alloc_bootmem(x) \
+ __alloc_bootmem((x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+ #define alloc_bootmem_low(x) \
--- /dev/null
+ cpumask.h | 2 +-
+ 1 files changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-2.6.11-xendiffs/include/linux/cpumask.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/include/linux/cpumask.h 2005-03-02 01:38:00.000000000 -0600
++++ linux-2.6.11-xendiffs/include/linux/cpumask.h 2005-03-24 15:06:18.408145243 -0600
+@@ -341,11 +341,11 @@ static inline int __cpumask_parse(const
+ * main(){ set1(3); set2(5); }
+ */
+
++#if NR_CPUS > 1
+ extern cpumask_t cpu_possible_map;
+ extern cpumask_t cpu_online_map;
+ extern cpumask_t cpu_present_map;
+
+-#if NR_CPUS > 1
+ #define num_online_cpus() cpus_weight(cpu_online_map)
+ #define num_possible_cpus() cpus_weight(cpu_possible_map)
+ #define num_present_cpus() cpus_weight(cpu_present_map)
--- /dev/null
+ current.h | 8 ++++++++
+ 1 files changed, 8 insertions(+)
+
+Index: linux-2.6.11/include/asm-ia64/current.h
+===================================================================
+--- linux-2.6.11.orig/include/asm-ia64/current.h 2005-03-02 01:38:19.000000000 -0600
++++ linux-2.6.11/include/asm-ia64/current.h 2005-03-19 12:39:41.410955288 -0600
+@@ -12,6 +12,14 @@
+ * In kernel mode, thread pointer (r13) is used to point to the current task
+ * structure.
+ */
++#ifdef XEN
++struct domain;
++#define get_current() ((struct exec_domain *) ia64_getreg(_IA64_REG_TP))
++#define current get_current()
++//#define set_current(d) ia64_setreg(_IA64_REG_TP,(void *)d);
++#define set_current(d) (ia64_r13 = (void *)d)
++#else
+ #define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP))
++#endif
+
+ #endif /* _ASM_IA64_CURRENT_H */
--- /dev/null
+ efi.c | 32 ++++++++++++++++++++++++++++++++
+ 1 files changed, 32 insertions(+)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/efi.c
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/efi.c 2005-04-07 12:22:08.230781400 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/efi.c 2005-04-07 12:25:11.875195997 -0500
+@@ -25,6 +25,9 @@
+ #include <linux/types.h>
+ #include <linux/time.h>
+ #include <linux/efi.h>
++#ifdef XEN
++#include <xen/sched.h>
++#endif
+
+ #include <asm/io.h>
+ #include <asm/kregs.h>
+@@ -218,6 +221,7 @@ efi_gettimeofday (struct timespec *ts)
+ if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
+ return;
+
++ dummy();
+ ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
+ ts->tv_nsec = tm.nanosecond;
+ }
+@@ -320,6 +324,10 @@ efi_memmap_walk (efi_freemem_callback_t
+ if (!(md->attribute & EFI_MEMORY_WB))
+ continue;
+
++#ifdef XEN
++// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
++ if (md->phys_addr >= 0x100000000) continue;
++#endif
+ /*
+ * granule_addr is the base of md's first granule.
+ * [granule_addr - first_non_wb_addr) is guaranteed to
+@@ -719,6 +727,30 @@ efi_get_iobase (void)
+ return 0;
+ }
+
++#ifdef XEN
++// variation of efi_get_iobase which returns entire memory descriptor
++efi_memory_desc_t *
++efi_get_io_md (void)
++{
++ void *efi_map_start, *efi_map_end, *p;
++ efi_memory_desc_t *md;
++ u64 efi_desc_size;
++
++ efi_map_start = __va(ia64_boot_param->efi_memmap);
++ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
++ efi_desc_size = ia64_boot_param->efi_memdesc_size;
++
++ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
++ md = p;
++ if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
++ if (md->attribute & EFI_MEMORY_UC)
++ return md;
++ }
++ }
++ return 0;
++}
++#endif
++
+ u32
+ efi_mem_type (unsigned long phys_addr)
+ {
--- /dev/null
+ entry.S | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 files changed, 85 insertions(+), 1 deletion(-)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/entry.S
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/entry.S 2005-04-08 13:32:07.636308237 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/entry.S 2005-04-08 13:37:04.612542509 -0500
+@@ -35,7 +35,9 @@
+
+ #include <asm/asmmacro.h>
+ #include <asm/cache.h>
++#ifndef XEN
+ #include <asm/errno.h>
++#endif
+ #include <asm/kregs.h>
+ #include <asm/offsets.h>
+ #include <asm/pgtable.h>
+@@ -46,6 +48,25 @@
+
+ #include "minstate.h"
+
++#ifdef XEN
++#define sys_execve 0
++#define do_fork 0
++#define syscall_trace_enter 0
++#define syscall_trace_leave 0
++#define schedule 0
++#define do_notify_resume_user 0
++#define ia64_rt_sigsuspend 0
++#define ia64_rt_sigreturn 0
++#define ia64_handle_unaligned 0
++#define errno 0
++#define sys_ni_syscall 0
++#define unw_init_frame_info 0
++#define sys_call_table 0
++#define do_sigdelayed 0
++#endif
++
++ /*
++
+ /*
+ * execve() is special because in case of success, we need to
+ * setup a null register window frame.
+@@ -187,11 +208,14 @@ GLOBAL_ENTRY(ia64_switch_to)
+ DO_SAVE_SWITCH_STACK
+ .body
+
++#ifdef XEN
++//#undef IA64_TASK_THREAD_KSP_OFFSET
++//#define IA64_TASK_THREAD_KSP_OFFSET 0x38
+ adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
+ movl r25=init_task
+ mov r27=IA64_KR(CURRENT_STACK)
+ adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
+- dep r20=0,in0,61,3 // physical address of "next"
++ dep r20=0,in0,60,4 // physical address of "next"
+ ;;
+ st8 [r22]=sp // save kernel stack pointer of old task
+ shr.u r26=r20,IA64_GRANULE_SHIFT
+@@ -203,6 +227,22 @@ GLOBAL_ENTRY(ia64_switch_to)
+ (p6) cmp.eq p7,p6=r26,r27
+ (p6) br.cond.dpnt .map
+ ;;
++#else
++ adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
++ mov r27=IA64_KR(CURRENT_STACK)
++ dep r20=0,in0,61,3 // physical address of "current"
++ ;;
++ st8 [r22]=sp // save kernel stack pointer of old task
++ shr.u r26=r20,IA64_GRANULE_SHIFT
++ adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
++ ;;
++ /*
++ * If we've already mapped this task's page, we can skip doing it again.
++ */
++ cmp.eq p7,p6=r26,r27
++(p6) br.cond.dpnt .map
++ ;;
++#endif
+ .done:
+ (p6) ssm psr.ic // if we had to map, reenable the psr.ic bit FIRST!!!
+ ;;
+@@ -220,6 +260,16 @@ GLOBAL_ENTRY(ia64_switch_to)
+ br.ret.sptk.many rp // boogie on out in new context
+
+ .map:
++#ifdef XEN
++ // avoid overlapping with kernel TR
++ movl r25=KERNEL_START
++ dep r23=0,in0,0,KERNEL_TR_PAGE_SHIFT
++ ;;
++ cmp.eq p7,p0=r25,r23
++ ;;
++(p7) mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped...
++(p7) br.cond.sptk .done
++#endif
+ rsm psr.ic // interrupts (psr.i) are already disabled here
+ movl r25=PAGE_KERNEL
+ ;;
+@@ -376,7 +426,11 @@ END(save_switch_stack)
+ * - b7 holds address to return to
+ * - must not touch r8-r11
+ */
++#ifdef XEN
++GLOBAL_ENTRY(load_switch_stack)
++#else
+ ENTRY(load_switch_stack)
++#endif
+ .prologue
+ .altrp b7
+
+@@ -604,6 +658,11 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
+ */
+ br.call.sptk.many rp=ia64_invoke_schedule_tail
+ }
++#ifdef XEN
++ // new domains are cloned but not exec'ed so switch to user mode here
++ cmp.ne pKStk,pUStk=r0,r0
++ br.cond.spnt ia64_leave_kernel
++#else
+ .ret8:
+ adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
+ ;;
+@@ -614,6 +673,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
+ ;;
+ cmp.ne p6,p0=r2,r0
+ (p6) br.cond.spnt .strace_check_retval
++#endif
+ ;; // added stop bits to prevent r8 dependency
+ END(ia64_ret_from_clone)
+ // fall through
+@@ -700,9 +760,14 @@ ENTRY(ia64_leave_syscall)
+ .work_processed_syscall:
+ adds r2=PT(LOADRS)+16,r12
+ adds r3=PT(AR_BSPSTORE)+16,r12
++#ifdef XEN
++ mov r31=r0
++ ;;
++#else
+ adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
+ ;;
+ (p6) ld4 r31=[r18] // load current_thread_info()->flags
++#endif
+ ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
+ mov b7=r0 // clear b7
+ ;;
+@@ -757,7 +822,11 @@ ENTRY(ia64_leave_syscall)
+ ;;
+ ld8.fill r12=[r2] // restore r12 (sp)
+ ld8.fill r15=[r3] // restore r15
++#ifdef XEN
++ movl r3=THIS_CPU(ia64_phys_stacked_size_p8)
++#else
+ addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
++#endif
+ ;;
+ (pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8
+ (pUStk) st1 [r14]=r17
+@@ -814,9 +883,18 @@ GLOBAL_ENTRY(ia64_leave_kernel)
+ (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
+ #endif
+ .work_processed_kernel:
++#ifdef XEN
++ alloc loc0=ar.pfs,0,1,1,0
++ adds out0=16,r12
++ ;;
++(p6) br.call.sptk.many b0=deliver_pending_interrupt
++ mov ar.pfs=loc0
++ mov r31=r0
++#else
+ adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
+ ;;
+ (p6) ld4 r31=[r17] // load current_thread_info()->flags
++#endif
+ adds r21=PT(PR)+16,r12
+ ;;
+
+@@ -934,7 +1012,11 @@ GLOBAL_ENTRY(ia64_leave_kernel)
+ shr.u r18=r19,16 // get byte size of existing "dirty" partition
+ ;;
+ mov r16=ar.bsp // get existing backing store pointer
++#ifdef XEN
++ movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
++#else
+ addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0
++#endif
+ ;;
+ ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
+ (pKStk) br.cond.dpnt skip_rbs_switch
+@@ -1323,6 +1405,7 @@ GLOBAL_ENTRY(unw_init_running)
+ br.ret.sptk.many rp
+ END(unw_init_running)
+
++#ifndef XEN
+ .rodata
+ .align 8
+ .globl sys_call_table
+@@ -1585,3 +1668,4 @@ sys_call_table:
+ data8 sys_ni_syscall
+
+ .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
++#endif
--- /dev/null
+ hardirq.h | 1 +
+ 1 files changed, 1 insertion(+)
+
+Index: linux-2.6.11-xendiffs/include/asm-ia64/hardirq.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/include/asm-ia64/hardirq.h 2005-03-24 15:59:37.210502749 -0600
++++ linux-2.6.11-xendiffs/include/asm-ia64/hardirq.h 2005-03-24 16:00:19.439540961 -0600
+@@ -20,6 +20,7 @@
+ #define __ARCH_IRQ_STAT 1
+
+ #define local_softirq_pending() (local_cpu_data->softirq_pending)
++#define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending)
+
+ #define HARDIRQ_BITS 14
+
--- /dev/null
+ head.S | 34 ++++++++++++++++++++++++++++++++++
+ 1 files changed, 34 insertions(+)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/head.S
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/head.S 2005-04-07 10:56:19.225128582 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/head.S 2005-04-07 11:00:21.718513399 -0500
+@@ -1,3 +1,8 @@
++#ifdef XEN
++#define console_print printf
++#define kernel_thread_helper 0
++#define sys_exit 0
++#endif
+ /*
+ * Here is where the ball gets rolling as far as the kernel is concerned.
+ * When control is transferred to _start, the bootload has already
+@@ -187,7 +192,11 @@ start_ap:
+ dep r18=0,r3,0,12
+ ;;
+ or r18=r17,r18
++#ifdef XEN
++ dep r2=-1,r3,60,4 // IMVA of task
++#else
+ dep r2=-1,r3,61,3 // IMVA of task
++#endif
+ ;;
+ mov r17=rr[r2]
+ shr.u r16=r3,IA64_GRANULE_SHIFT
+@@ -227,7 +236,11 @@ start_ap:
+ ;;
+ mov ar.rsc=0x3 // place RSE in eager mode
+
++#ifdef XEN
++(isBP) dep r28=-1,r28,60,4 // make address virtual
++#else
+ (isBP) dep r28=-1,r28,61,3 // make address virtual
++#endif
+ (isBP) movl r2=ia64_boot_param
+ ;;
+ (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
+@@ -245,7 +258,21 @@ start_ap:
+ br.call.sptk.many rp=sys_fw_init
+ .ret1:
+ #endif
++#ifdef XEN
++ alloc r2=ar.pfs,8,0,2,0
++ ;;
++#define fake_mbi_magic 0
++#define MULTIBOOT_INFO_SIZE 1024
++ .rodata
++fake_mbi:
++ .skip MULTIBOOT_INFO_SIZE
++ .previous
++ movl out0=fake_mbi
++ ;;
++ br.call.sptk.many rp=cmain
++#else
+ br.call.sptk.many rp=start_kernel
++#endif
+ .ret2: addl r3=@ltoff(halt_msg),gp
+ ;;
+ alloc r2=ar.pfs,8,0,2,0
+@@ -254,7 +281,9 @@ start_ap:
+ br.call.sptk.many b0=console_print
+
+ self: hint @pause
++ ;;
+ br.sptk.many self // endless loop
++ ;;
+ END(_start)
+
+ GLOBAL_ENTRY(ia64_save_debug_regs)
+@@ -850,7 +879,12 @@ END(ia64_delay_loop)
+ * intermediate precision so that we can produce a full 64-bit result.
+ */
+ GLOBAL_ENTRY(sched_clock)
++#ifdef XEN
++ break 0;; // FIX IA64_CPUINFO_NSEC_PER_CYC_OFFSET
++ //movl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET
++#else
+ addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
++#endif
+ mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
+ ;;
+ ldf8 f8=[r8]
--- /dev/null
+ hpsim_irq.c | 15 +++++++++++++++
+ 1 files changed, 15 insertions(+)
+
+Index: linux-2.6.11/arch/ia64/hp/sim/hpsim_irq.c
+===================================================================
+--- linux-2.6.11.orig/arch/ia64/hp/sim/hpsim_irq.c 2005-03-02 01:38:33.000000000 -0600
++++ linux-2.6.11/arch/ia64/hp/sim/hpsim_irq.c 2005-03-19 13:33:57.312014806 -0600
+@@ -9,7 +9,17 @@
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/irq.h>
++#ifdef XEN
++#include <asm/hw_irq.h>
++#endif
+
++#if 1
++void __init
++hpsim_irq_init (void)
++{
++ printf("*** hpsim_irq_init called: NOT NEEDED?!?!?\n");
++}
++#else
+ static unsigned int
+ hpsim_irq_startup (unsigned int irq)
+ {
+@@ -19,6 +29,10 @@ hpsim_irq_startup (unsigned int irq)
+ static void
+ hpsim_irq_noop (unsigned int irq)
+ {
++#if 1
++printf("hpsim_irq_noop: irq=%d\n",irq);
++while(irq);
++#endif
+ }
+
+ static void
+@@ -49,3 +63,4 @@ hpsim_irq_init (void)
+ idesc->handler = &irq_type_hp_sim;
+ }
+ }
++#endif
--- /dev/null
+ hpsim_ssc.h | 19 +++++++++++++++++++
+ 1 files changed, 19 insertions(+)
+
+Index: linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h
+===================================================================
+--- linux-2.6.11.orig/arch/ia64/hp/sim/hpsim_ssc.h 2005-03-02 01:38:17.000000000 -0600
++++ linux-2.6.11/arch/ia64/hp/sim/hpsim_ssc.h 2005-03-19 13:34:01.705520375 -0600
+@@ -33,4 +33,23 @@
+ */
+ extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
+
++#ifdef XEN
++/* Note: These are declared in linux/arch/ia64/hp/sim/simscsi.c but belong
++ * in linux/include/asm-ia64/hpsim_ssc.h, hence their addition here */
++#define SSC_OPEN 50
++#define SSC_CLOSE 51
++#define SSC_READ 52
++#define SSC_WRITE 53
++#define SSC_GET_COMPLETION 54
++#define SSC_WAIT_COMPLETION 55
++
++#define SSC_WRITE_ACCESS 2
++#define SSC_READ_ACCESS 1
++
++struct ssc_disk_req {
++ unsigned long addr;
++ unsigned long len;
++};
++#endif
++
+ #endif /* _IA64_PLATFORM_HPSIM_SSC_H */
--- /dev/null
+ interrupt.h | 2 ++
+ 1 files changed, 2 insertions(+)
+
+Index: linux-2.6.11/include/linux/interrupt.h
+===================================================================
+--- linux-2.6.11.orig/include/linux/interrupt.h 2005-03-02 01:38:09.000000000 -0600
++++ linux-2.6.11/include/linux/interrupt.h 2005-03-19 13:41:00.739901125 -0600
+@@ -33,6 +33,7 @@ typedef int irqreturn_t;
+ #define IRQ_HANDLED (1)
+ #define IRQ_RETVAL(x) ((x) != 0)
+
++#ifndef XEN
+ struct irqaction {
+ irqreturn_t (*handler)(int, void *, struct pt_regs *);
+ unsigned long flags;
+@@ -49,6 +50,7 @@ extern int request_irq(unsigned int,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long, const char *, void *);
+ extern void free_irq(unsigned int, void *);
++#endif
+
+
+ #ifdef CONFIG_GENERIC_HARDIRQS
--- /dev/null
+ io.h | 4 ++++
+ 1 files changed, 4 insertions(+)
+
+Index: linux-2.6.11/include/asm-ia64/io.h
+===================================================================
+--- linux-2.6.11.orig/include/asm-ia64/io.h 2005-03-02 01:38:34.000000000 -0600
++++ linux-2.6.11/include/asm-ia64/io.h 2005-03-19 13:42:06.541900818 -0600
+@@ -23,7 +23,11 @@
+ #define __SLOW_DOWN_IO do { } while (0)
+ #define SLOW_DOWN_IO do { } while (0)
+
++#ifdef XEN
++#define __IA64_UNCACHED_OFFSET 0xdffc000000000000UL /* region 6 */
++#else
+ #define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */
++#endif
+
+ /*
+ * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
--- /dev/null
+ irq.h | 9 +++++++++
+ 1 files changed, 9 insertions(+)
+
+Index: linux-2.6.11/include/asm-ia64/irq.h
+===================================================================
+--- linux-2.6.11.orig/include/asm-ia64/irq.h 2005-03-02 01:38:33.000000000 -0600
++++ linux-2.6.11/include/asm-ia64/irq.h 2005-03-19 13:42:27.957677364 -0600
+@@ -30,6 +30,15 @@ extern void disable_irq_nosync (unsigned
+ extern void enable_irq (unsigned int);
+ extern void set_irq_affinity_info (unsigned int irq, int dest, int redir);
+
++#ifdef XEN
++// dup'ed from signal.h to avoid changes to includes
++#define SA_NOPROFILE 0x02000000
++#define SA_SHIRQ 0x04000000
++#define SA_RESTART 0x10000000
++#define SA_INTERRUPT 0x20000000
++#define SA_SAMPLE_RANDOM SA_RESTART
++#endif
++
+ #ifdef CONFIG_SMP
+ extern void move_irq(int irq);
+ #else
--- /dev/null
+ irq_ia64.c | 67 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 67 insertions(+)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/irq_ia64.c
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/irq_ia64.c 2005-04-08 13:30:16.777174938 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/irq_ia64.c 2005-04-08 14:15:47.398616472 -0500
+@@ -17,18 +17,26 @@
+ #include <linux/config.h>
+ #include <linux/module.h>
+
++#ifndef XEN
+ #include <linux/jiffies.h>
++#endif
+ #include <linux/errno.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/ioport.h>
++#ifndef XEN
+ #include <linux/kernel_stat.h>
++#endif
+ #include <linux/slab.h>
++#ifndef XEN
+ #include <linux/ptrace.h>
+ #include <linux/random.h> /* for rand_initialize_irq() */
+ #include <linux/signal.h>
++#endif
+ #include <linux/smp.h>
++#ifndef XEN
+ #include <linux/smp_lock.h>
++#endif
+ #include <linux/threads.h>
+ #include <linux/bitops.h>
+
+@@ -104,6 +112,24 @@ void
+ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
+ {
+ unsigned long saved_tpr;
++#if 0
++//FIXME: For debug only, can be removed
++ static char firstirq = 1;
++ static char firsttime[256];
++ static char firstpend[256];
++ if (firstirq) {
++ int i;
++ for (i=0;i<256;i++) firsttime[i] = 1;
++ for (i=0;i<256;i++) firstpend[i] = 1;
++ firstirq = 0;
++ }
++ if (firsttime[vector]) {
++ printf("**** (entry) First received int on vector=%d,itc=%lx\n",
++ (unsigned long) vector, ia64_get_itc());
++ firsttime[vector] = 0;
++ }
++#endif
++
+
+ #if IRQ_DEBUG
+ {
+@@ -148,6 +174,27 @@ ia64_handle_irq (ia64_vector vector, str
+ ia64_setreg(_IA64_REG_CR_TPR, vector);
+ ia64_srlz_d();
+
++#ifdef XEN
++ if (vector != 0xef) {
++ extern void vcpu_pend_interrupt(void *, int);
++#if 0
++ if (firsttime[vector]) {
++ printf("**** (iterate) First received int on vector=%d,itc=%lx\n",
++ (unsigned long) vector, ia64_get_itc());
++ firsttime[vector] = 0;
++ }
++ if (firstpend[vector]) {
++ printf("**** First pended int on vector=%d,itc=%lx\n",
++ (unsigned long) vector,ia64_get_itc());
++ firstpend[vector] = 0;
++ }
++#endif
++ //FIXME: TEMPORARY HACK!!!!
++ vcpu_pend_interrupt(dom0->exec_domain[0],vector);
++ domain_wake(dom0->exec_domain[0]);
++ }
++ else
++#endif
+ __do_IRQ(local_vector_to_irq(vector), regs);
+
+ /*
+@@ -276,3 +323,23 @@ ia64_send_ipi (int cpu, int vector, int
+
+ writeq(ipi_data, ipi_addr);
+ }
++
++/* From linux/kernel/softirq.c */
++#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
++# define invoke_softirq() __do_softirq()
++#else
++# define invoke_softirq() do_softirq()
++#endif
++
++/*
++ * Exit an interrupt context. Process softirqs if needed and possible:
++ */
++void irq_exit(void)
++{
++ account_system_vtime(current);
++ sub_preempt_count(IRQ_EXIT_OFFSET);
++ if (!in_interrupt() && local_softirq_pending())
++ invoke_softirq();
++ preempt_enable_no_resched();
++}
++/* end from linux/kernel/softirq.c */
--- /dev/null
+ ivt.S | 254 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 254 insertions(+)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/ivt.S
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/ivt.S 2005-04-07 10:29:00.565766924 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/ivt.S 2005-04-07 10:29:50.923594750 -0500
+@@ -1,3 +1,21 @@
++
++#ifdef XEN
++//#define CONFIG_DISABLE_VHPT // FIXME: change when VHPT is enabled??
++// these are all hacked out for now as the entire IVT
++// will eventually be replaced... just want to use it
++// for startup code to handle TLB misses
++//#define ia64_leave_kernel 0
++//#define ia64_ret_from_syscall 0
++//#define ia64_handle_irq 0
++//#define ia64_fault 0
++#define ia64_illegal_op_fault 0
++#define ia64_prepare_handle_unaligned 0
++#define ia64_bad_break 0
++#define ia64_trace_syscall 0
++#define sys_call_table 0
++#define sys_ni_syscall 0
++#include <asm/vhpt.h>
++#endif
+ /*
+ * arch/ia64/kernel/ivt.S
+ *
+@@ -77,6 +95,13 @@
+ mov r19=n;; /* prepare to save predicates */ \
+ br.sptk.many dispatch_to_fault_handler
+
++#ifdef XEN
++#define REFLECT(n) \
++ mov r31=pr; \
++ mov r19=n;; /* prepare to save predicates */ \
++ br.sptk.many dispatch_reflection
++#endif
++
+ .section .text.ivt,"ax"
+
+ .align 32768 // align on 32KB boundary
+@@ -214,6 +239,13 @@ END(vhpt_miss)
+ // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
+ ENTRY(itlb_miss)
+ DBG_FAULT(1)
++#ifdef XEN
++ VHPT_CCHAIN_LOOKUP(itlb_miss,i)
++#ifdef VHPT_GLOBAL
++ br.cond.sptk page_fault
++ ;;
++#endif
++#endif
+ /*
+ * The ITLB handler accesses the L3 PTE via the virtually mapped linear
+ * page table. If a nested TLB miss occurs, we switch into physical
+@@ -258,6 +290,13 @@ END(itlb_miss)
+ // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
+ ENTRY(dtlb_miss)
+ DBG_FAULT(2)
++#ifdef XEN
++ VHPT_CCHAIN_LOOKUP(dtlb_miss,d)
++#ifdef VHPT_GLOBAL
++ br.cond.sptk page_fault
++ ;;
++#endif
++#endif
+ /*
+ * The DTLB handler accesses the L3 PTE via the virtually mapped linear
+ * page table. If a nested TLB miss occurs, we switch into physical
+@@ -302,6 +341,13 @@ END(dtlb_miss)
+ // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
+ ENTRY(alt_itlb_miss)
+ DBG_FAULT(3)
++#ifdef XEN
++//#ifdef VHPT_GLOBAL
++// VHPT_CCHAIN_LOOKUP(alt_itlb_miss,i)
++// br.cond.sptk page_fault
++// ;;
++//#endif
++#endif
+ mov r16=cr.ifa // get address that caused the TLB miss
+ movl r17=PAGE_KERNEL
+ mov r21=cr.ipsr
+@@ -340,6 +386,13 @@ END(alt_itlb_miss)
+ // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
+ ENTRY(alt_dtlb_miss)
+ DBG_FAULT(4)
++#ifdef XEN
++//#ifdef VHPT_GLOBAL
++// VHPT_CCHAIN_LOOKUP(alt_dtlb_miss,d)
++// br.cond.sptk page_fault
++// ;;
++//#endif
++#endif
+ mov r16=cr.ifa // get address that caused the TLB miss
+ movl r17=PAGE_KERNEL
+ mov r20=cr.isr
+@@ -369,6 +422,17 @@ ENTRY(alt_dtlb_miss)
+ cmp.ne p8,p0=r0,r23
+ (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
+ (p8) br.cond.spnt page_fault
++#ifdef XEN
++ ;;
++ // FIXME: inadequate test, this is where we test for Xen address
++ // note that 0xf000 (cached) and 0xd000 (uncached) addresses
++ // should be OK. (Though no I/O is done in Xen, EFI needs uncached
++ // addresses and some domain EFI calls are passed through)
++ tbit.nz p0,p8=r16,60
++(p8) br.cond.spnt page_fault
++//(p8) br.cond.spnt 0
++ ;;
++#endif
+
+ dep r21=-1,r21,IA64_PSR_ED_BIT,1
+ or r19=r19,r17 // insert PTE control bits into r19
+@@ -449,6 +513,9 @@ END(nested_dtlb_miss)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
+ ENTRY(ikey_miss)
++#ifdef XEN
++ REFLECT(6)
++#endif
+ DBG_FAULT(6)
+ FAULT(6)
+ END(ikey_miss)
+@@ -461,9 +528,16 @@ ENTRY(page_fault)
+ srlz.i
+ ;;
+ SAVE_MIN_WITH_COVER
++#ifdef XEN
++ alloc r15=ar.pfs,0,0,4,0
++ mov out0=cr.ifa
++ mov out1=cr.isr
++ mov out3=cr.itir
++#else
+ alloc r15=ar.pfs,0,0,3,0
+ mov out0=cr.ifa
+ mov out1=cr.isr
++#endif
+ adds r3=8,r2 // set up second base pointer
+ ;;
+ ssm psr.ic | PSR_DEFAULT_BITS
+@@ -484,6 +558,9 @@ END(page_fault)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
+ ENTRY(dkey_miss)
++#ifdef XEN
++ REFLECT(7)
++#endif
+ DBG_FAULT(7)
+ FAULT(7)
+ END(dkey_miss)
+@@ -492,6 +569,9 @@ END(dkey_miss)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
+ ENTRY(dirty_bit)
++#ifdef XEN
++ REFLECT(8)
++#endif
+ DBG_FAULT(8)
+ /*
+ * What we do here is to simply turn on the dirty bit in the PTE. We need to
+@@ -554,6 +634,9 @@ END(dirty_bit)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
+ ENTRY(iaccess_bit)
++#ifdef XEN
++ REFLECT(9)
++#endif
+ DBG_FAULT(9)
+ // Like Entry 8, except for instruction access
+ mov r16=cr.ifa // get the address that caused the fault
+@@ -619,6 +702,9 @@ END(iaccess_bit)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
+ ENTRY(daccess_bit)
++#ifdef XEN
++ REFLECT(10)
++#endif
+ DBG_FAULT(10)
+ // Like Entry 8, except for data access
+ mov r16=cr.ifa // get the address that caused the fault
+@@ -687,6 +773,16 @@ ENTRY(break_fault)
+ * to prevent leaking bits from kernel to user level.
+ */
+ DBG_FAULT(11)
++#ifdef XEN
++ mov r16=cr.isr
++ mov r17=cr.iim
++ mov r31=pr
++ ;;
++ cmp.eq p7,p0=r0,r17 // is this a psuedo-cover?
++ // FIXME: may also need to check slot==2?
++(p7) br.sptk.many dispatch_privop_fault
++ br.sptk.many dispatch_break_fault
++#endif
+ mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat.
+ mov r17=cr.iim
+ mov r18=__IA64_BREAK_SYSCALL
+@@ -697,7 +793,9 @@ ENTRY(break_fault)
+ mov r27=ar.rsc
+ mov r26=ar.pfs
+ mov r28=cr.iip
++#ifndef XEN
+ mov r31=pr // prepare to save predicates
++#endif
+ mov r20=r1
+ ;;
+ adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
+@@ -797,6 +895,36 @@ END(interrupt)
+ DBG_FAULT(13)
+ FAULT(13)
+
++#ifdef XEN
++ // There is no particular reason for this code to be here, other than that
++ // there happens to be space here that would go unused otherwise. If this
++ // fault ever gets "unreserved", simply moved the following code to a more
++ // suitable spot...
++
++ENTRY(dispatch_break_fault)
++ SAVE_MIN_WITH_COVER
++ ;;
++ alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
++ mov out0=cr.ifa
++ adds out1=16,sp
++ mov out2=cr.isr // FIXME: pity to make this slow access twice
++ mov out3=cr.iim // FIXME: pity to make this slow access twice
++
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ SAVE_REST
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.sptk.many ia64_prepare_handle_break
++END(dispatch_break_fault)
++#endif
++
+ .org ia64_ivt+0x3800
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x3800 Entry 14 (size 64 bundles) Reserved
+@@ -850,9 +978,11 @@ END(interrupt)
+ * - ar.fpsr: set to kernel settings
+ */
+ GLOBAL_ENTRY(ia64_syscall_setup)
++#ifndef XEN
+ #if PT(B6) != 0
+ # error This code assumes that b6 is the first field in pt_regs.
+ #endif
++#endif
+ st8 [r1]=r19 // save b6
+ add r16=PT(CR_IPSR),r1 // initialize first base pointer
+ add r17=PT(R11),r1 // initialize second base pointer
+@@ -992,6 +1122,37 @@ END(dispatch_illegal_op_fault)
+ DBG_FAULT(16)
+ FAULT(16)
+
++#ifdef XEN
++ // There is no particular reason for this code to be here, other than that
++ // there happens to be space here that would go unused otherwise. If this
++ // fault ever gets "unreserved", simply moved the following code to a more
++ // suitable spot...
++
++ENTRY(dispatch_privop_fault)
++ SAVE_MIN_WITH_COVER
++ ;;
++ alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
++ mov out0=cr.ifa
++ adds out1=16,sp
++ mov out2=cr.isr // FIXME: pity to make this slow access twice
++ mov out3=cr.itir
++
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ SAVE_REST
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.sptk.many ia64_prepare_handle_privop
++END(dispatch_privop_fault)
++#endif
++
++
+ .org ia64_ivt+0x4400
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x4400 Entry 17 (size 64 bundles) Reserved
+@@ -1108,6 +1269,9 @@ END(dispatch_to_fault_handler)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
+ ENTRY(page_not_present)
++#ifdef XEN
++ REFLECT(20)
++#endif
+ DBG_FAULT(20)
+ mov r16=cr.ifa
+ rsm psr.dt
+@@ -1128,6 +1292,9 @@ END(page_not_present)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
+ ENTRY(key_permission)
++#ifdef XEN
++ REFLECT(21)
++#endif
+ DBG_FAULT(21)
+ mov r16=cr.ifa
+ rsm psr.dt
+@@ -1141,6 +1308,9 @@ END(key_permission)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
+ ENTRY(iaccess_rights)
++#ifdef XEN
++ REFLECT(22)
++#endif
+ DBG_FAULT(22)
+ mov r16=cr.ifa
+ rsm psr.dt
+@@ -1154,6 +1324,9 @@ END(iaccess_rights)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
+ ENTRY(daccess_rights)
++#ifdef XEN
++ REFLECT(23)
++#endif
+ DBG_FAULT(23)
+ mov r16=cr.ifa
+ rsm psr.dt
+@@ -1171,8 +1344,13 @@ ENTRY(general_exception)
+ mov r16=cr.isr
+ mov r31=pr
+ ;;
++#ifdef XEN
++ cmp4.ge p6,p0=0x20,r16
++(p6) br.sptk.many dispatch_privop_fault
++#else
+ cmp4.eq p6,p0=0,r16
+ (p6) br.sptk.many dispatch_illegal_op_fault
++#endif
+ ;;
+ mov r19=24 // fault number
+ br.sptk.many dispatch_to_fault_handler
+@@ -1182,6 +1360,9 @@ END(general_exception)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
+ ENTRY(disabled_fp_reg)
++#ifdef XEN
++ REFLECT(25)
++#endif
+ DBG_FAULT(25)
+ rsm psr.dfh // ensure we can access fph
+ ;;
+@@ -1195,6 +1376,9 @@ END(disabled_fp_reg)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
+ ENTRY(nat_consumption)
++#ifdef XEN
++ REFLECT(26)
++#endif
+ DBG_FAULT(26)
+ FAULT(26)
+ END(nat_consumption)
+@@ -1203,6 +1387,10 @@ END(nat_consumption)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
+ ENTRY(speculation_vector)
++#ifdef XEN
++ // this probably need not reflect...
++ REFLECT(27)
++#endif
+ DBG_FAULT(27)
+ /*
+ * A [f]chk.[as] instruction needs to take the branch to the recovery code but
+@@ -1246,6 +1434,9 @@ END(speculation_vector)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
+ ENTRY(debug_vector)
++#ifdef XEN
++ REFLECT(29)
++#endif
+ DBG_FAULT(29)
+ FAULT(29)
+ END(debug_vector)
+@@ -1254,6 +1445,9 @@ END(debug_vector)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
+ ENTRY(unaligned_access)
++#ifdef XEN
++ REFLECT(30)
++#endif
+ DBG_FAULT(30)
+ mov r16=cr.ipsr
+ mov r31=pr // prepare to save predicates
+@@ -1265,6 +1459,9 @@ END(unaligned_access)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
+ ENTRY(unsupported_data_reference)
++#ifdef XEN
++ REFLECT(31)
++#endif
+ DBG_FAULT(31)
+ FAULT(31)
+ END(unsupported_data_reference)
+@@ -1273,6 +1470,9 @@ END(unsupported_data_reference)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
+ ENTRY(floating_point_fault)
++#ifdef XEN
++ REFLECT(32)
++#endif
+ DBG_FAULT(32)
+ FAULT(32)
+ END(floating_point_fault)
+@@ -1281,6 +1481,9 @@ END(floating_point_fault)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
+ ENTRY(floating_point_trap)
++#ifdef XEN
++ REFLECT(33)
++#endif
+ DBG_FAULT(33)
+ FAULT(33)
+ END(floating_point_trap)
+@@ -1289,6 +1492,9 @@ END(floating_point_trap)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
+ ENTRY(lower_privilege_trap)
++#ifdef XEN
++ REFLECT(34)
++#endif
+ DBG_FAULT(34)
+ FAULT(34)
+ END(lower_privilege_trap)
+@@ -1297,6 +1503,9 @@ END(lower_privilege_trap)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
+ ENTRY(taken_branch_trap)
++#ifdef XEN
++ REFLECT(35)
++#endif
+ DBG_FAULT(35)
+ FAULT(35)
+ END(taken_branch_trap)
+@@ -1305,6 +1514,9 @@ END(taken_branch_trap)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
+ ENTRY(single_step_trap)
++#ifdef XEN
++ REFLECT(36)
++#endif
+ DBG_FAULT(36)
+ FAULT(36)
+ END(single_step_trap)
+@@ -1361,6 +1573,9 @@ END(single_step_trap)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
+ ENTRY(ia32_exception)
++#ifdef XEN
++ REFLECT(45)
++#endif
+ DBG_FAULT(45)
+ FAULT(45)
+ END(ia32_exception)
+@@ -1369,6 +1584,9 @@ END(ia32_exception)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
+ ENTRY(ia32_intercept)
++#ifdef XEN
++ REFLECT(46)
++#endif
+ DBG_FAULT(46)
+ #ifdef CONFIG_IA32_SUPPORT
+ mov r31=pr
+@@ -1399,6 +1617,9 @@ END(ia32_intercept)
+ /////////////////////////////////////////////////////////////////////////////////////////
+ // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
+ ENTRY(ia32_interrupt)
++#ifdef XEN
++ REFLECT(47)
++#endif
+ DBG_FAULT(47)
+ #ifdef CONFIG_IA32_SUPPORT
+ mov r31=pr
+@@ -1528,6 +1749,39 @@ END(ia32_interrupt)
+ DBG_FAULT(67)
+ FAULT(67)
+
++#ifdef XEN
++ .org ia64_ivt+0x8000
++ENTRY(dispatch_reflection)
++ /*
++ * Input:
++ * psr.ic: off
++ * r19: intr type (offset into ivt, see ia64_int.h)
++ * r31: contains saved predicates (pr)
++ */
++ SAVE_MIN_WITH_COVER_R19
++ alloc r14=ar.pfs,0,0,5,0
++ mov out4=r15
++ mov out0=cr.ifa
++ adds out1=16,sp
++ mov out2=cr.isr
++ mov out3=cr.iim
++// mov out3=cr.itir
++
++ ssm psr.ic | PSR_DEFAULT_BITS
++ ;;
++ srlz.i // guarantee that interruption collection is on
++ ;;
++(p15) ssm psr.i // restore psr.i
++ adds r3=8,r2 // set up second base pointer
++ ;;
++ SAVE_REST
++ movl r14=ia64_leave_kernel
++ ;;
++ mov rp=r14
++ br.sptk.many ia64_prepare_handle_reflection
++END(dispatch_reflection)
++#endif
++
+ #ifdef CONFIG_IA32_SUPPORT
+
+ /*
--- /dev/null
+ time.c | 7 ++++++-
+ 1 files changed, 6 insertions(+), 1 deletion(-)
+
+Index: linux-2.6.11/kernel/time.c
+===================================================================
+--- linux-2.6.11.orig/kernel/time.c 2005-03-02 01:37:50.000000000 -0600
++++ linux-2.6.11/kernel/time.c 2005-03-19 14:56:40.767870674 -0600
+@@ -495,6 +495,7 @@ void getnstimeofday (struct timespec *tv
+ tv->tv_nsec = nsec;
+ }
+
++#ifndef XEN
+ int do_settimeofday (struct timespec *tv)
+ {
+ time_t wtm_sec, sec = tv->tv_sec;
+@@ -503,7 +504,9 @@ int do_settimeofday (struct timespec *tv
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ write_seqlock_irq(&xtime_lock);
++#endif
+ {
+ /*
+ * This is revolting. We need to set "xtime" correctly. However, the value
+@@ -525,7 +528,9 @@ int do_settimeofday (struct timespec *tv
+ time_esterror = NTP_PHASE_LIMIT;
+ time_interpolator_reset();
+ }
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ write_sequnlock_irq(&xtime_lock);
++#endif
+ clock_was_set();
+ return 0;
+ }
+@@ -552,7 +557,7 @@ void do_gettimeofday (struct timeval *tv
+ }
+
+ EXPORT_SYMBOL(do_gettimeofday);
+-
++#endif
+
+ #else
+ /*
--- /dev/null
+ kregs.h | 4 ++++
+ 1 files changed, 4 insertions(+)
+
+Index: linux-2.6.11/include/asm-ia64/kregs.h
+===================================================================
+--- linux-2.6.11.orig/include/asm-ia64/kregs.h 2005-03-02 01:37:49.000000000 -0600
++++ linux-2.6.11/include/asm-ia64/kregs.h 2005-03-19 13:44:24.362628092 -0600
+@@ -31,6 +31,10 @@
+ #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */
+ #define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */
+ #define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */
++#ifdef XEN
++#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */
++#define IA64_TR_VHPT 4 /* dtr4: vhpt */
++#endif
+
+ /* Processor status register bits: */
+ #define IA64_PSR_BE_BIT 1
--- /dev/null
+ vmlinux.lds.S | 2 ++
+ 1 files changed, 2 insertions(+)
+
+Index: linux-2.6.11/arch/ia64/kernel/vmlinux.lds.S
+===================================================================
+--- linux-2.6.11.orig/arch/ia64/kernel/vmlinux.lds.S 2005-03-02 01:38:25.000000000 -0600
++++ linux-2.6.11/arch/ia64/kernel/vmlinux.lds.S 2005-03-19 13:44:28.746368232 -0600
+@@ -11,12 +11,14 @@
+ OUTPUT_FORMAT("elf64-ia64-little")
+ OUTPUT_ARCH(ia64)
+ ENTRY(phys_start)
++#ifndef XEN
+ jiffies = jiffies_64;
+ PHDRS {
+ code PT_LOAD;
+ percpu PT_LOAD;
+ data PT_LOAD;
+ }
++#endif
+ SECTIONS
+ {
+ /* Sections to be discarded */
--- /dev/null
+ extable.c | 2 ++
+ 1 files changed, 2 insertions(+)
+
+Index: linux-2.6.11-xendiffs/kernel/extable.c
+===================================================================
+--- linux-2.6.11-xendiffs.orig/kernel/extable.c 2005-03-02 01:37:54.000000000 -0600
++++ linux-2.6.11-xendiffs/kernel/extable.c 2005-04-08 14:30:46.283360881 -0500
+@@ -20,6 +20,8 @@
+ #include <asm/uaccess.h>
+ #include <asm/sections.h>
+
++#define __module_text_address(addr) (NULL)
++
+ extern struct exception_table_entry __start___ex_table[];
+ extern struct exception_table_entry __stop___ex_table[];
+
--- /dev/null
+ hardirq.h | 6 ++++++
+ 1 files changed, 6 insertions(+)
+
+Index: linux-2.6.11-xendiffs/include/linux/hardirq.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/include/linux/hardirq.h 2005-03-02 01:38:00.000000000 -0600
++++ linux-2.6.11-xendiffs/include/linux/hardirq.h 2005-03-25 08:49:57.301998663 -0600
+@@ -2,7 +2,9 @@
+ #define LINUX_HARDIRQ_H
+
+ #include <linux/config.h>
++#ifndef XEN
+ #include <linux/smp_lock.h>
++#endif
+ #include <asm/hardirq.h>
+ #include <asm/system.h>
+
+@@ -60,7 +62,11 @@
+ */
+ #define in_irq() (hardirq_count())
+ #define in_softirq() (softirq_count())
++#ifndef XEN
+ #define in_interrupt() (irq_count())
++#else
++#define in_interrupt() 0 // FIXME LATER
++#endif
+
+ #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
+ # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
--- /dev/null
+ time.h | 9 +++++++++
+ 1 files changed, 9 insertions(+)
+
+Index: linux-2.6.11/include/linux/time.h
+===================================================================
+--- linux-2.6.11.orig/include/linux/time.h 2005-03-02 01:38:12.000000000 -0600
++++ linux-2.6.11/include/linux/time.h 2005-03-19 13:46:27.987225234 -0600
+@@ -1,11 +1,18 @@
+ #ifndef _LINUX_TIME_H
+ #define _LINUX_TIME_H
+
++#ifdef XEN
++typedef s64 time_t;
++typedef s64 suseconds_t;
++#endif
++
+ #include <linux/types.h>
+
++#ifndef XEN
+ #ifdef __KERNEL__
+ #include <linux/seqlock.h>
+ #endif
++#endif
+
+ #ifndef _STRUCT_TIMESPEC
+ #define _STRUCT_TIMESPEC
+@@ -80,7 +87,9 @@ mktime (unsigned int year, unsigned int
+
+ extern struct timespec xtime;
+ extern struct timespec wall_to_monotonic;
++#ifndef XEN
+ extern seqlock_t xtime_lock;
++#endif
+
+ static inline unsigned long get_seconds(void)
+ {
--- /dev/null
+ mca_asm.h | 11 +++++++++++
+ 1 files changed, 11 insertions(+)
+
+Index: linux-2.6.11-xendiffs/include/asm-ia64/mca_asm.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/include/asm-ia64/mca_asm.h 2005-03-02 01:38:38.000000000 -0600
++++ linux-2.6.11-xendiffs/include/asm-ia64/mca_asm.h 2005-04-06 22:41:57.392411032 -0500
+@@ -26,8 +26,13 @@
+ * direct mapped to physical addresses.
+ * 1. Lop off bits 61 thru 63 in the virtual address
+ */
++#ifdef XEN
++#define INST_VA_TO_PA(addr) \
++ dep addr = 0, addr, 60, 4
++#else // XEN
+ #define INST_VA_TO_PA(addr) \
+ dep addr = 0, addr, 61, 3
++#endif // XEN
+ /*
+ * This macro converts a data virtual address to a physical address
+ * Right now for simulation purposes the virtual addresses are
+@@ -42,9 +47,15 @@
+ * direct mapped to physical addresses.
+ * 1. Put 0x7 in bits 61 thru 63.
+ */
++#ifdef XEN
++#define DATA_PA_TO_VA(addr,temp) \
++ mov temp = 0xf ;; \
++ dep addr = temp, addr, 60, 4
++#else // XEN
+ #define DATA_PA_TO_VA(addr,temp) \
+ mov temp = 0x7 ;; \
+ dep addr = temp, addr, 61, 3
++#endif // XEN
+
+ #define GET_THIS_PADDR(reg, var) \
+ mov reg = IA64_KR(PER_CPU_DATA);; \
--- /dev/null
+ minstate.h | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/minstate.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/minstate.h 2005-04-06 22:51:31.170261541 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/minstate.h 2005-04-06 22:54:03.210575034 -0500
+@@ -48,7 +48,7 @@
+ (pUStk) mov r24=ar.rnat; \
+ (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
+ (pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
+-(pUStk) dep r22=-1,r22,61,3; /* compute kernel virtual addr of RBS */ \
++(pUStk) dep r22=-1,r22,60,4; /* compute kernel virtual addr of RBS */ \
+ ;; \
+ (pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
+ (pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
+@@ -57,7 +57,7 @@
+ (pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
+
+ #define MINSTATE_END_SAVE_MIN_PHYS \
+- dep r12=-1,r12,61,3; /* make sp a kernel virtual address */ \
++ dep r12=-1,r12,60,4; /* make sp a kernel virtual address */ \
+ ;;
+
+ #ifdef MINSTATE_VIRT
--- /dev/null
+ contig.c | 172 +++++++++++++++++----------------------------------------------
+ 1 files changed, 48 insertions(+), 124 deletions(-)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/mm/contig.c
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/mm/contig.c 2005-04-07 11:02:50.227598140 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/mm/contig.c 2005-04-07 11:05:21.724931959 -0500
+@@ -15,11 +15,21 @@
+ * memory.
+ */
+ #include <linux/config.h>
++#ifdef XEN
++#include <xen/sched.h>
++#endif
+ #include <linux/bootmem.h>
+ #include <linux/efi.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h>
+
++#ifdef XEN
++#undef reserve_bootmem
++extern struct page *zero_page_memmap_ptr;
++struct page *mem_map;
++#define MAX_DMA_ADDRESS ~0UL // FIXME???
++#endif
++
+ #include <asm/meminit.h>
+ #include <asm/pgalloc.h>
+ #include <asm/pgtable.h>
+@@ -38,30 +48,7 @@ static unsigned long num_dma_physpages;
+ void
+ show_mem (void)
+ {
+- int i, total = 0, reserved = 0;
+- int shared = 0, cached = 0;
+-
+- printk("Mem-info:\n");
+- show_free_areas();
+-
+- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+- i = max_mapnr;
+- while (i-- > 0) {
+- if (!pfn_valid(i))
+- continue;
+- total++;
+- if (PageReserved(mem_map+i))
+- reserved++;
+- else if (PageSwapCache(mem_map+i))
+- cached++;
+- else if (page_count(mem_map + i))
+- shared += page_count(mem_map + i) - 1;
+- }
+- printk("%d pages of RAM\n", total);
+- printk("%d reserved pages\n", reserved);
+- printk("%d pages shared\n", shared);
+- printk("%d pages swap cached\n", cached);
+- printk("%ld pages in page table cache\n", pgtable_cache_size);
++ printk("Dummy show_mem\n");
+ }
+
+ /* physical address where the bootmem map is located */
+@@ -81,6 +68,9 @@ find_max_pfn (unsigned long start, unsig
+ {
+ unsigned long *max_pfnp = arg, pfn;
+
++#ifdef XEN
++//printf("find_max_pfn: start=%lx, end=%lx, *arg=%lx\n",start,end,*(unsigned long *)arg);
++#endif
+ pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
+ if (pfn > *max_pfnp)
+ *max_pfnp = pfn;
+@@ -134,41 +124,6 @@ find_bootmap_location (unsigned long sta
+ return 0;
+ }
+
+-/**
+- * find_memory - setup memory map
+- *
+- * Walk the EFI memory map and find usable memory for the system, taking
+- * into account reserved areas.
+- */
+-void
+-find_memory (void)
+-{
+- unsigned long bootmap_size;
+-
+- reserve_memory();
+-
+- /* first find highest page frame number */
+- max_pfn = 0;
+- efi_memmap_walk(find_max_pfn, &max_pfn);
+-
+- /* how many bytes to cover all the pages */
+- bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
+-
+- /* look for a location to hold the bootmap */
+- bootmap_start = ~0UL;
+- efi_memmap_walk(find_bootmap_location, &bootmap_size);
+- if (bootmap_start == ~0UL)
+- panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
+-
+- bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
+-
+- /* Free all available memory, then mark bootmem-map as being in use. */
+- efi_memmap_walk(filter_rsvd_memory, free_bootmem);
+- reserve_bootmem(bootmap_start, bootmap_size);
+-
+- find_initrd();
+-}
+-
+ #ifdef CONFIG_SMP
+ /**
+ * per_cpu_init - setup per-cpu variables
+@@ -228,72 +183,41 @@ count_dma_pages (u64 start, u64 end, voi
+ void
+ paging_init (void)
+ {
+- unsigned long max_dma;
+- unsigned long zones_size[MAX_NR_ZONES];
+-#ifdef CONFIG_VIRTUAL_MEM_MAP
+- unsigned long zholes_size[MAX_NR_ZONES];
+- unsigned long max_gap;
+-#endif
+-
+- /* initialize mem_map[] */
+-
+- memset(zones_size, 0, sizeof(zones_size));
+-
+- num_physpages = 0;
+- efi_memmap_walk(count_pages, &num_physpages);
+-
+- max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+-
+-#ifdef CONFIG_VIRTUAL_MEM_MAP
+- memset(zholes_size, 0, sizeof(zholes_size));
++ struct pfn_info *pg;
++ /* Allocate and map the machine-to-phys table */
++ if ((pg = alloc_domheap_pages(NULL, 10)) == NULL)
++ panic("Not enough memory to bootstrap Xen.\n");
++ memset(page_to_virt(pg), 0x55, 16UL << 20);
+
+- num_dma_physpages = 0;
+- efi_memmap_walk(count_dma_pages, &num_dma_physpages);
++ /* Other mapping setup */
+
+- if (max_low_pfn < max_dma) {
+- zones_size[ZONE_DMA] = max_low_pfn;
+- zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages;
+- } else {
+- zones_size[ZONE_DMA] = max_dma;
+- zholes_size[ZONE_DMA] = max_dma - num_dma_physpages;
+- if (num_physpages > num_dma_physpages) {
+- zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+- zholes_size[ZONE_NORMAL] =
+- ((max_low_pfn - max_dma) -
+- (num_physpages - num_dma_physpages));
+- }
+- }
+-
+- max_gap = 0;
+- efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+- if (max_gap < LARGE_GAP) {
+- vmem_map = (struct page *) 0;
+- free_area_init_node(0, &contig_page_data, zones_size, 0,
+- zholes_size);
+- } else {
+- unsigned long map_size;
+-
+- /* allocate virtual_mem_map */
+-
+- map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+- vmalloc_end -= map_size;
+- vmem_map = (struct page *) vmalloc_end;
+- efi_memmap_walk(create_mem_map_page_table, NULL);
+-
+- mem_map = contig_page_data.node_mem_map = vmem_map;
+- free_area_init_node(0, &contig_page_data, zones_size,
+- 0, zholes_size);
+-
+- printk("Virtual mem_map starts at 0x%p\n", mem_map);
+- }
+-#else /* !CONFIG_VIRTUAL_MEM_MAP */
+- if (max_low_pfn < max_dma)
+- zones_size[ZONE_DMA] = max_low_pfn;
+- else {
+- zones_size[ZONE_DMA] = max_dma;
+- zones_size[ZONE_NORMAL] = max_low_pfn - max_dma;
+- }
+- free_area_init(zones_size);
+-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
+ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
+ }
++
++struct pfn_info *frame_table;
++unsigned long frame_table_size;
++unsigned long max_page;
++
++/* FIXME: postpone support to machines with big holes between physical memorys.
++ * Current hack allows only efi memdesc upto 4G place. (See efi.c)
++ */
++#ifndef CONFIG_VIRTUAL_MEM_MAP
++#define FT_ALIGN_SIZE (16UL << 20)
++void __init init_frametable(void)
++{
++ unsigned long i, p;
++ frame_table_size = max_page * sizeof(struct pfn_info);
++ frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
++
++ /* Request continuous trunk from boot allocator, since HV
++ * address is identity mapped */
++ p = alloc_boot_pages(frame_table_size, FT_ALIGN_SIZE);
++ if (p == 0)
++ panic("Not enough memory for frame table.\n");
++
++ frame_table = __va(p);
++ memset(frame_table, 0, frame_table_size);
++ printk("size of frame_table: %lukB\n",
++ frame_table_size >> 10);
++}
++#endif
--- /dev/null
+ mmzone.h | 4 ++++
+ 1 files changed, 4 insertions(+)
+
+Index: linux-2.6.11/include/linux/mmzone.h
+===================================================================
+--- linux-2.6.11.orig/include/linux/mmzone.h 2005-03-02 01:38:10.000000000 -0600
++++ linux-2.6.11/include/linux/mmzone.h 2005-03-19 13:49:30.427573139 -0600
+@@ -209,7 +209,11 @@ struct zone {
+ * rarely used fields:
+ */
+ char *name;
++#ifdef XEN
++};
++#else
+ } ____cacheline_maxaligned_in_smp;
++#endif
+
+
+ /*
--- /dev/null
+ page.h | 42 +++++++++++++++++++++++++++++++++++++++---
+ 1 files changed, 39 insertions(+), 3 deletions(-)
+
+Index: linux-2.6.11-xendiffs/include/asm-ia64/page.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/include/asm-ia64/page.h 2005-04-06 22:58:07.597539393 -0500
++++ linux-2.6.11-xendiffs/include/asm-ia64/page.h 2005-04-06 23:06:15.908576975 -0500
+@@ -12,6 +12,9 @@
+ #include <asm/intrinsics.h>
+ #include <asm/types.h>
+
++#ifndef __ASSEMBLY__
++#include <asm/flushtlb.h>
++#endif
+ /*
+ * PAGE_SHIFT determines the actual kernel page size.
+ */
+@@ -95,9 +98,11 @@ extern int ia64_pfn_valid (unsigned long
+ #endif
+
+ #ifndef CONFIG_DISCONTIGMEM
++#ifdef XEN
++#define pfn_valid(pfn) (0)
++#else
+ # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
+-# define page_to_pfn(page) ((unsigned long) (page - mem_map))
+-# define pfn_to_page(pfn) (mem_map + (pfn))
++#endif
+ #else
+ extern struct page *vmem_map;
+ extern unsigned long max_low_pfn;
+@@ -106,9 +111,15 @@ extern unsigned long max_low_pfn;
+ # define pfn_to_page(pfn) (vmem_map + (pfn))
+ #endif
+
+-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
++#define page_to_pfn(_page) ((unsigned long)((_page) - frame_table))
++#define page_to_virt(_page) phys_to_virt(page_to_phys(_page))
++
++#define page_to_phys(_page) (page_to_pfn(_page) << PAGE_SHIFT)
+ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
++#define pfn_to_page(_pfn) (frame_table + (_pfn))
++#define phys_to_page(kaddr) pfn_to_page(((kaddr) >> PAGE_SHIFT))
++
+ typedef union ia64_va {
+ struct {
+ unsigned long off : 61; /* intra-region offset */
+@@ -124,8 +135,25 @@ typedef union ia64_va {
+ * expressed in this way to ensure they result in a single "dep"
+ * instruction.
+ */
++#ifdef XEN
++typedef union xen_va {
++ struct {
++ unsigned long off : 60;
++ unsigned long reg : 4;
++ } f;
++ unsigned long l;
++ void *p;
++} xen_va;
++
++// xen/drivers/console.c uses __va in a declaration (should be fixed!)
++#define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
++#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
++//# define __pa(x) ((unsigned long)(((unsigned long)x) - PAGE_OFFSET))
++//# define __va(x) ((void *)((char *)(x) + PAGE_OFFSET))
++#else
+ #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
+ #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
++#endif
+
+ #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
+ #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
+@@ -197,11 +225,19 @@ get_order (unsigned long size)
+ # define __pgprot(x) (x)
+ #endif /* !STRICT_MM_TYPECHECKS */
+
++#ifdef XEN
++#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000)
++#else
+ #define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000)
++#endif
+
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
+ (((current->personality & READ_IMPLIES_EXEC) != 0) \
+ ? VM_EXEC : 0))
+
++#ifdef XEN
++#define __flush_tlb() do {} while(0);
++#endif
++
+ #endif /* _ASM_IA64_PAGE_H */
--- /dev/null
+ pal.S | 8 ++++++++
+ 1 files changed, 8 insertions(+)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/pal.S
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/pal.S 2005-03-02 01:38:33.000000000 -0600
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/pal.S 2005-04-06 22:43:53.817885390 -0500
+@@ -166,7 +166,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
+ adds r8 = 1f-1b,r8 // calculate return address for call
+ ;;
+ mov loc4=ar.rsc // save RSE configuration
++#ifdef XEN
++ dep.z loc2=loc2,0,60 // convert pal entry point to physical
++#else // XEN
+ dep.z loc2=loc2,0,61 // convert pal entry point to physical
++#endif // XEN
+ tpa r8=r8 // convert rp to physical
+ ;;
+ mov b7 = loc2 // install target to branch reg
+@@ -225,7 +229,11 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
+ mov loc3 = psr // save psr
+ ;;
+ mov loc4=ar.rsc // save RSE configuration
++#ifdef XEN
++ dep.z loc2=loc2,0,60 // convert pal entry point to physical
++#else // XEN
+ dep.z loc2=loc2,0,61 // convert pal entry point to physical
++#endif // XEN
+ ;;
+ mov ar.rsc=0 // put RSE in enforced lazy, LE mode
+ movl r16=PAL_PSR_BITS_TO_CLEAR
--- /dev/null
+ pgalloc.h | 17 +++++++++++------
+ 1 files changed, 11 insertions(+), 6 deletions(-)
+
+Index: linux-2.6.11-xendiffs/include/asm-ia64/pgalloc.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/include/asm-ia64/pgalloc.h 2005-04-08 11:57:30.909774800 -0500
++++ linux-2.6.11-xendiffs/include/asm-ia64/pgalloc.h 2005-04-08 11:58:08.102711219 -0500
+@@ -18,6 +18,7 @@
+ #include <linux/compiler.h>
+ #include <linux/mm.h>
+ #include <linux/page-flags.h>
++#include <linux/preempt.h>
+ #include <linux/threads.h>
+
+ #include <asm/mmu_context.h>
+@@ -34,6 +35,10 @@
+ #define pmd_quicklist (local_cpu_data->pmd_quick)
+ #define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
+
++/* FIXME: Later 3 level page table should be over, to create
++ * new interface upon xen memory allocator. To simplify first
++ * effort moving to xen allocator, use xenheap pages temporarily.
++ */
+ static inline pgd_t*
+ pgd_alloc_one_fast (struct mm_struct *mm)
+ {
+@@ -61,7 +66,7 @@ pgd_alloc (struct mm_struct *mm)
+ pgd_t *pgd = pgd_alloc_one_fast(mm);
+
+ if (unlikely(pgd == NULL)) {
+- pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
++ pgd = (pgd_t *)alloc_xenheap_page();
+ }
+ return pgd;
+ }
+@@ -104,7 +109,7 @@ pmd_alloc_one_fast (struct mm_struct *mm
+ static inline pmd_t*
+ pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
+ {
+- pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
++ pmd_t *pmd = (pmd_t *)alloc_xenheap_page();
+
+ return pmd;
+ }
+@@ -136,7 +141,7 @@ pmd_populate_kernel (struct mm_struct *m
+ static inline struct page *
+ pte_alloc_one (struct mm_struct *mm, unsigned long addr)
+ {
+- struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++ struct page *pte = alloc_xenheap_page();
+
+ return pte;
+ }
+@@ -144,7 +149,7 @@ pte_alloc_one (struct mm_struct *mm, uns
+ static inline pte_t *
+ pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
+ {
+- pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
++ pte_t *pte = (pte_t *)alloc_xenheap_page();
+
+ return pte;
+ }
+@@ -152,13 +157,13 @@ pte_alloc_one_kernel (struct mm_struct *
+ static inline void
+ pte_free (struct page *pte)
+ {
+- __free_page(pte);
++ free_xenheap_page(pte);
+ }
+
+ static inline void
+ pte_free_kernel (pte_t *pte)
+ {
+- free_page((unsigned long) pte);
++ free_xenheap_page((unsigned long) pte);
+ }
+
+ #define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte))
--- /dev/null
+ processor.h | 4 ++++
+ 1 files changed, 4 insertions(+)
+
+Index: linux-2.6.11/include/asm-ia64/processor.h
+===================================================================
+--- linux-2.6.11.orig/include/asm-ia64/processor.h 2005-03-02 01:37:58.000000000 -0600
++++ linux-2.6.11/include/asm-ia64/processor.h 2005-03-19 14:26:01.062135543 -0600
+@@ -408,12 +408,16 @@ extern void ia64_setreg_unknown_kr (void
+ */
+
+ /* Return TRUE if task T owns the fph partition of the CPU we're running on. */
++#ifdef XEN
++#define ia64_is_local_fpu_owner(t) 0
++#else
+ #define ia64_is_local_fpu_owner(t) \
+ ({ \
+ struct task_struct *__ia64_islfo_task = (t); \
+ (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
+ && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
+ })
++#endif
+
+ /* Mark task T as owning the fph partition of the CPU we're running on. */
+ #define ia64_set_local_fpu_owner(t) do { \
--- /dev/null
+ sal.h | 17 +++++++++++++++++
+ 1 files changed, 17 insertions(+)
+
+Index: linux-2.6.11-xendiffs/include/asm-ia64/sal.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/include/asm-ia64/sal.h 2005-04-08 12:00:53.510988510 -0500
++++ linux-2.6.11-xendiffs/include/asm-ia64/sal.h 2005-04-08 12:02:17.778587216 -0500
+@@ -36,6 +36,7 @@
+ #ifndef __ASSEMBLY__
+
+ #include <linux/bcd.h>
++#include <linux/preempt.h>
+ #include <linux/spinlock.h>
+ #include <linux/efi.h>
+
+@@ -650,7 +651,23 @@ ia64_sal_freq_base (unsigned long which,
+ {
+ struct ia64_sal_retval isrv;
+
++//#ifdef XEN
++#if 0
++ unsigned long *x = (unsigned long *)ia64_sal;
++ unsigned long *inst = (unsigned long *)*x;
++ unsigned long __ia64_sc_flags;
++ struct ia64_fpreg __ia64_sc_fr[6];
++printf("ia64_sal_freq_base: about to save_scratch_fpregs\n");
++ ia64_save_scratch_fpregs(__ia64_sc_fr);
++ spin_lock_irqsave(&sal_lock, __ia64_sc_flags);
++printf("ia64_sal_freq_base: about to call, ia64_sal=%p, ia64_sal[0]=%p, ia64_sal[1]=%p\n",x,x[0],x[1]);
++printf("first inst=%p,%p\n",inst[0],inst[1]);
++ isrv = (*ia64_sal)(SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
++ spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags);
++ ia64_load_scratch_fpregs(__ia64_sc_fr);
++#else
+ SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
++#endif
+ *ticks_per_second = isrv.v0;
+ *drift_info = isrv.v1;
+ return isrv.status;
--- /dev/null
+bootmem.h
+current.h
+efi.c
+efi.h
+entry.S
+gcc_intrin.h
+hardirq.h
+head.S
+hpsim_irq.c
+hpsim_ssc.h
+hw_irq.h
+ide.h
+init_task.c
+init_task.h
+interrupt.h
+io.h
+irq.h
+irq_ia64.c
+ivt.S
+kregs.h
+lds.S
+linuxtime.h
+minstate.h
+mm_bootmem.c
+mm_contig.c
+mmzone.h
+page_alloc.c
+page.h
+processor.h
+sal.h
+setup.c
+slab.c
+slab.h
+system.h
+time.c
+kernel-time.c
+tlb.c
+types.h
+unaligned.c
+wait.h
--- /dev/null
+ setup.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++-------------
+ 1 files changed, 58 insertions(+), 14 deletions(-)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/setup.c
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/setup.c 2005-04-07 17:44:13.294980153 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/setup.c 2005-04-07 17:46:37.157717072 -0500
+@@ -21,6 +21,9 @@
+ #include <linux/init.h>
+
+ #include <linux/acpi.h>
++#ifdef XEN
++#include <xen/sched.h>
++#endif
+ #include <linux/bootmem.h>
+ #include <linux/console.h>
+ #include <linux/delay.h>
+@@ -30,13 +33,17 @@
+ #include <linux/seq_file.h>
+ #include <linux/string.h>
+ #include <linux/threads.h>
++#ifndef XEN
+ #include <linux/tty.h>
+ #include <linux/serial.h>
+ #include <linux/serial_core.h>
++#endif
+ #include <linux/efi.h>
+ #include <linux/initrd.h>
+
++#ifndef XEN
+ #include <asm/ia32.h>
++#endif
+ #include <asm/machvec.h>
+ #include <asm/mca.h>
+ #include <asm/meminit.h>
+@@ -51,6 +58,12 @@
+ #include <asm/smp.h>
+ #include <asm/system.h>
+ #include <asm/unistd.h>
++#ifdef XEN
++#include <linux/mm.h>
++#include <asm/mmu_context.h>
++extern unsigned long loops_per_jiffy; // from linux/init/main.c
++char saved_command_line[COMMAND_LINE_SIZE]; // from linux/init/main.c
++#endif
+
+ #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
+ # error "struct cpuinfo_ia64 too big!"
+@@ -66,7 +79,9 @@ DEFINE_PER_CPU(unsigned long, local_per_
+ DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
+ unsigned long ia64_cycles_per_usec;
+ struct ia64_boot_param *ia64_boot_param;
++#ifndef XEN
+ struct screen_info screen_info;
++#endif
+
+ unsigned long ia64_max_cacheline_size;
+ unsigned long ia64_iobase; /* virtual address for I/O accesses */
+@@ -95,7 +110,6 @@ EXPORT_SYMBOL(ia64_max_iommu_merge_mask)
+ struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
+ int num_rsvd_regions;
+
+-
+ /*
+ * Filter incoming memory segments based on the primitive map created from the boot
+ * parameters. Segments contained in the map are removed from the memory ranges. A
+@@ -125,9 +139,12 @@ filter_rsvd_memory (unsigned long start,
+ for (i = 0; i < num_rsvd_regions; ++i) {
+ range_start = max(start, prev_start);
+ range_end = min(end, rsvd_region[i].start);
+-
+- if (range_start < range_end)
+- call_pernode_memory(__pa(range_start), range_end - range_start, func);
++ /* init_boot_pages requires "ps, pe" */
++ if (range_start < range_end) {
++ printk("Init boot pages: 0x%lx -> 0x%lx.\n",
++ __pa(range_start), __pa(range_end));
++ (*func)(__pa(range_start), __pa(range_end), 0);
++ }
+
+ /* nothing more available in this segment */
+ if (range_end == end) return 0;
+@@ -184,17 +201,17 @@ reserve_memory (void)
+ + strlen(__va(ia64_boot_param->command_line)) + 1);
+ n++;
+
++ /* Reserve xen image/bitmap/xen-heap */
+ rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
+- rsvd_region[n].end = (unsigned long) ia64_imva(_end);
++ rsvd_region[n].end = rsvd_region[n].start + xenheap_size;
+ n++;
+
+-#ifdef CONFIG_BLK_DEV_INITRD
++ /* This is actually dom0 image */
+ if (ia64_boot_param->initrd_start) {
+ rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
+ rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
+ n++;
+ }
+-#endif
+
+ /* end of memory marker */
+ rsvd_region[n].start = ~0UL;
+@@ -204,6 +221,16 @@ reserve_memory (void)
+ num_rsvd_regions = n;
+
+ sort_regions(rsvd_region, num_rsvd_regions);
++
++ {
++ int i;
++ printk("Reserved regions: \n");
++ for (i = 0; i < num_rsvd_regions; i++)
++ printk(" [%d] -> [0x%lx, 0x%lx]\n",
++ i,
++ rsvd_region[i].start,
++ rsvd_region[i].end);
++ }
+ }
+
+ /**
+@@ -298,18 +325,17 @@ mark_bsp_online (void)
+ #endif
+ }
+
++#ifdef XEN
+ void __init
+-setup_arch (char **cmdline_p)
++early_setup_arch (char **cmdline_p)
+ {
+ unw_init();
+
+- ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
+-
+ *cmdline_p = __va(ia64_boot_param->command_line);
+ strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
++ cmdline_parse(*cmdline_p);
+
+ efi_init();
+- io_port_init();
+
+ #ifdef CONFIG_IA64_GENERIC
+ {
+@@ -339,6 +365,10 @@ setup_arch (char **cmdline_p)
+ if (early_console_setup(*cmdline_p) == 0)
+ mark_bsp_online();
+
++#ifdef XEN
++#undef CONFIG_ACPI_BOOT
++#endif
++
+ #ifdef CONFIG_ACPI_BOOT
+ /* Initialize the ACPI boot-time table parser */
+ acpi_table_init();
+@@ -350,9 +380,13 @@ setup_arch (char **cmdline_p)
+ smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
+ # endif
+ #endif /* CONFIG_APCI_BOOT */
++ io_port_init();
++}
++#endif
+
+- find_memory();
+-
++void __init
++setup_arch (void)
++{
+ /* process SAL system table: */
+ ia64_sal_init(efi.sal_systab);
+
+@@ -388,7 +422,6 @@ setup_arch (char **cmdline_p)
+ if (!strstr(saved_command_line, "nomca"))
+ ia64_mca_init();
+
+- platform_setup(cmdline_p);
+ paging_init();
+ }
+
+@@ -448,6 +481,9 @@ show_cpuinfo (struct seq_file *m, void *
+ sprintf(cp, " 0x%lx", mask);
+ }
+
++#ifdef XEN
++#define seq_printf(a,b...) printf(b)
++#endif
+ seq_printf(m,
+ "processor : %d\n"
+ "vendor : %s\n"
+@@ -659,11 +695,17 @@ cpu_init (void)
+ | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
++#ifdef XEN
++ if (current->domain->arch.mm)
++#else
+ if (current->mm)
++#endif
+ BUG();
+
+ ia64_mmu_init(ia64_imva(cpu_data));
++#ifndef XEN
+ ia64_mca_cpu_init(ia64_imva(cpu_data));
++#endif
+
+ #ifdef CONFIG_IA32_SUPPORT
+ ia32_cpu_init();
+@@ -711,6 +753,8 @@ cpu_init (void)
+ void
+ check_bugs (void)
+ {
++#ifndef XEN
+ ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
+ (unsigned long) __end___mckinley_e9_bundles);
++#endif
+ }
--- /dev/null
+ slab.h | 4 ++++
+ 1 files changed, 4 insertions(+)
+
+Index: linux-2.6.11/include/linux/slab.h
+===================================================================
+--- linux-2.6.11.orig/include/linux/slab.h 2005-03-02 01:38:33.000000000 -0600
++++ linux-2.6.11/include/linux/slab.h 2005-03-19 14:35:19.301871922 -0600
+@@ -91,7 +91,11 @@ static inline void *kmalloc(size_t size,
+ goto found; \
+ else \
+ i++;
++#ifdef XEN
++#include <linux/kmalloc_sizes.h>
++#else
+ #include "kmalloc_sizes.h"
++#endif
+ #undef CACHE
+ {
+ extern void __you_cannot_kmalloc_that_much(void);
--- /dev/null
+ swiotlb.c | 21 +++++++++++++--------
+ 1 files changed, 13 insertions(+), 8 deletions(-)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/lib/swiotlb.c
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/lib/swiotlb.c 2005-04-08 12:13:54.040202667 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/lib/swiotlb.c 2005-04-08 12:19:09.170367318 -0500
+@@ -124,8 +124,11 @@ swiotlb_init_with_default_size (size_t d
+ /*
+ * Get IO TLB memory from the low pages
+ */
+- io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs *
+- (1 << IO_TLB_SHIFT));
++ /* FIXME: Do we really need swiotlb in HV? If all memory trunks
++ * presented to guest as <4G, are actually <4G in machine range,
++ * no DMA intevention from HV...
++ */
++ io_tlb_start = alloc_xenheap_pages(get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)));
+ if (!io_tlb_start)
+ panic("Cannot allocate SWIOTLB buffer");
+ io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+@@ -135,16 +138,16 @@ swiotlb_init_with_default_size (size_t d
+ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+ * between io_tlb_start and io_tlb_end.
+ */
+- io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
++ io_tlb_list = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(int)));
+ for (i = 0; i < io_tlb_nslabs; i++)
+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_index = 0;
+- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
++ io_tlb_orig_addr = alloc_xenheap_pages(get_order(io_tlb_nslabs * sizeof(char *)));
+
+ /*
+ * Get the overflow emergency buffer
+ */
+- io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++ io_tlb_overflow_buffer = alloc_xenheap_pages(get_order(io_tlb_overflow));
+ printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
+ virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+ }
+@@ -328,13 +331,13 @@ swiotlb_alloc_coherent(struct device *hw
+ */
+ flags |= GFP_DMA;
+
+- ret = (void *)__get_free_pages(flags, order);
++ ret = (void *)alloc_xenheap_pages(get_order(size));
+ if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
+ /*
+ * The allocated memory isn't reachable by the device.
+ * Fall back on swiotlb_map_single().
+ */
+- free_pages((unsigned long) ret, order);
++ free_xenheap_pages((unsigned long) ret, order);
+ ret = NULL;
+ }
+ if (!ret) {
+@@ -372,7 +375,7 @@ swiotlb_free_coherent(struct device *hwd
+ {
+ if (!(vaddr >= (void *)io_tlb_start
+ && vaddr < (void *)io_tlb_end))
+- free_pages((unsigned long) vaddr, get_order(size));
++ free_xenheap_pages((unsigned long) vaddr, get_order(size));
+ else
+ /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
+ swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
+@@ -388,8 +391,10 @@ swiotlb_full(struct device *dev, size_t
+ * When the mapping is small enough return a static buffer to limit
+ * the damage, or panic when the transfer is too big.
+ */
++#ifndef XEN
+ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
+ "device %s\n", size, dev ? dev->bus_id : "?");
++#endif
+
+ if (size > io_tlb_overflow && do_panic) {
+ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
--- /dev/null
+ system.h | 15 +++++++++++++++
+ 1 files changed, 15 insertions(+)
+
+Index: linux-2.6.11-xendiffs/include/asm-ia64/system.h
+===================================================================
+--- linux-2.6.11-xendiffs.orig/include/asm-ia64/system.h 2005-04-07 10:39:11.066701457 -0500
++++ linux-2.6.11-xendiffs/include/asm-ia64/system.h 2005-04-07 10:40:19.540544127 -0500
+@@ -24,8 +24,16 @@
+ * 0xa000000000000000+2*PERCPU_PAGE_SIZE
+ * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
+ */
++#ifdef XEN
++//#define KERNEL_START 0xf000000100000000
++#define KERNEL_START 0xf000000004000000
++#define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE
++#define SHAREDINFO_ADDR 0xf100000000000000
++#define VHPT_ADDR 0xf200000000000000
++#else
+ #define KERNEL_START __IA64_UL_CONST(0xa000000100000000)
+ #define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
++#endif
+
+ #ifndef __ASSEMBLY__
+
+@@ -218,9 +226,13 @@ extern void ia64_load_extra (struct task
+ # define PERFMON_IS_SYSWIDE() (0)
+ #endif
+
++#ifdef XEN
++#define IA64_HAS_EXTRA_STATE(t) 0
++#else
+ #define IA64_HAS_EXTRA_STATE(t) \
+ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
+ || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
++#endif
+
+ #define __switch_to(prev,next,last) do { \
+ if (IA64_HAS_EXTRA_STATE(prev)) \
+@@ -249,6 +261,9 @@ extern void ia64_load_extra (struct task
+ #else
+ # define switch_to(prev,next,last) __switch_to(prev, next, last)
+ #endif
++//#ifdef XEN
++//#undef switch_to
++//#endif
+
+ /*
+ * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch,
--- /dev/null
+ time.c | 158 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 files changed, 158 insertions(+)
+
+Index: linux-2.6.11-xendiffs/arch/ia64/kernel/time.c
+===================================================================
+--- linux-2.6.11-xendiffs.orig/arch/ia64/kernel/time.c 2005-04-07 17:02:39.634985144 -0500
++++ linux-2.6.11-xendiffs/arch/ia64/kernel/time.c 2005-04-07 17:23:52.777723222 -0500
+@@ -10,16 +10,22 @@
+ */
+ #include <linux/config.h>
+
++#ifndef XEN
+ #include <linux/cpu.h>
++#endif
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
++#ifndef XEN
+ #include <linux/profile.h>
++#endif
+ #include <linux/sched.h>
+ #include <linux/time.h>
+ #include <linux/interrupt.h>
+ #include <linux/efi.h>
++#ifndef XEN
+ #include <linux/profile.h>
++#endif
+ #include <linux/timex.h>
+
+ #include <asm/machvec.h>
+@@ -29,10 +35,19 @@
+ #include <asm/sal.h>
+ #include <asm/sections.h>
+ #include <asm/system.h>
++#ifdef XEN
++#include <asm/ia64_int.h>
++#endif
+
+ extern unsigned long wall_jiffies;
+
++#ifndef XEN
+ u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
++#else
++#define INITIAL_JIFFIES 0
++u64 jiffies_64 = INITIAL_JIFFIES;
++#define CPU_PROFILING 0
++#endif
+
+ EXPORT_SYMBOL(jiffies_64);
+
+@@ -45,33 +60,154 @@ EXPORT_SYMBOL(last_cli_ip);
+
+ #endif
+
++#ifdef XEN
++volatile unsigned long last_nsec_offset;
++extern rwlock_t xtime_lock;
++unsigned long cpu_khz; /* Detected as we calibrate the TSC */
++static s_time_t stime_irq; /* System time at last 'time update' */
++
++static inline u64 get_time_delta(void)
++{
++ return ia64_get_itc();
++}
++
++s_time_t get_s_time(void)
++{
++ s_time_t now;
++ unsigned long flags;
++
++ read_lock_irqsave(&xtime_lock, flags);
++
++ now = stime_irq + get_time_delta();
++
++ /* Ensure that the returned system time is monotonically increasing. */
++ {
++ static s_time_t prev_now = 0;
++ if ( unlikely(now < prev_now) )
++ now = prev_now;
++ prev_now = now;
++ }
++
++ read_unlock_irqrestore(&xtime_lock, flags);
++
++ return now;
++}
++
++void update_dom_time(struct exec_domain *ed)
++{
++// FIXME: implement this?
++// printf("update_dom_time: called, not implemented, skipping\n");
++ return;
++}
++
++/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */
++void do_settime(unsigned long secs, unsigned long usecs, u64 system_time_base)
++{
++// FIXME: Should this be do_settimeofday (from linux)???
++ printf("do_settime: called, not implemented, stopping\n");
++ dummy();
++}
++#endif
++
++#if 0 /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
++#endif /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! */
++
++#ifndef XEN
+ static struct time_interpolator itc_interpolator = {
+ .shift = 16,
+ .mask = 0xffffffffffffffffLL,
+ .source = TIME_SOURCE_CPU
+ };
++#endif
++
++#ifdef XEN
++unsigned long domain0_ready = 0; // FIXME (see below)
++#define typecheck(a,b) 1
++/* FROM linux/include/linux/jiffies.h */
++/*
++ * These inlines deal with timer wrapping correctly. You are
++ * strongly encouraged to use them
++ * 1. Because people otherwise forget
++ * 2. Because if the timer wrap changes in future you won't have to
++ * alter your driver code.
++ *
++ * time_after(a,b) returns true if the time a is after time b.
++ *
++ * Do this with "<0" and ">=0" to only test the sign of the result. A
++ * good compiler would generate better code (and a really good compiler
++ * wouldn't care). Gcc is currently neither.
++ */
++#define time_after(a,b) \
++ (typecheck(unsigned long, a) && \
++ typecheck(unsigned long, b) && \
++ ((long)(b) - (long)(a) < 0))
++#define time_before(a,b) time_after(b,a)
++
++#define time_after_eq(a,b) \
++ (typecheck(unsigned long, a) && \
++ typecheck(unsigned long, b) && \
++ ((long)(a) - (long)(b) >= 0))
++#define time_before_eq(a,b) time_after_eq(b,a)
++#endif
+
+ static irqreturn_t
+ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
+ {
+ unsigned long new_itm;
+
++#ifndef XEN
+ if (unlikely(cpu_is_offline(smp_processor_id()))) {
+ return IRQ_HANDLED;
+ }
++#endif
++#ifdef XEN
++ if (current->domain == dom0) {
++ // FIXME: there's gotta be a better way of doing this...
++ // We have to ensure that domain0 is launched before we
++ // call vcpu_timer_expired on it
++ //domain0_ready = 1; // moved to xensetup.c
++ }
++ if (domain0_ready && vcpu_timer_expired(dom0->exec_domain[0])) {
++ vcpu_pend_timer(dom0->exec_domain[0]);
++ //vcpu_set_next_timer(dom0->exec_domain[0]);
++ domain_wake(dom0->exec_domain[0]);
++ }
++ if (!is_idle_task(current->domain) && current->domain != dom0) {
++ if (vcpu_timer_expired(current)) {
++ vcpu_pend_timer(current);
++ // ensure another timer interrupt happens even if domain doesn't
++ vcpu_set_next_timer(current);
++ domain_wake(current);
++ }
++ }
++ raise_actimer_softirq();
++#endif
+
++#ifndef XEN
+ platform_timer_interrupt(irq, dev_id, regs);
++#endif
+
+ new_itm = local_cpu_data->itm_next;
+
+ if (!time_after(ia64_get_itc(), new_itm))
++#ifdef XEN
++ return;
++#else
+ printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
+ ia64_get_itc(), new_itm);
++#endif
+
++#ifdef XEN
++// printf("GOT TO HERE!!!!!!!!!!!\n");
++ //while(1);
++#else
+ profile_tick(CPU_PROFILING, regs);
++#endif
+
+ while (1) {
++#ifndef XEN
+ update_process_times(user_mode(regs));
++#endif
+
+ new_itm += local_cpu_data->itm_delta;
+
+@@ -82,10 +218,16 @@ timer_interrupt (int irq, void *dev_id,
+ * another CPU. We need to avoid to SMP race by acquiring the
+ * xtime_lock.
+ */
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ write_seqlock(&xtime_lock);
++#endif
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ do_timer(regs);
++#endif
+ local_cpu_data->itm_next = new_itm;
++#ifdef TURN_ME_OFF_FOR_NOW_IA64_XEN
+ write_sequnlock(&xtime_lock);
++#endif
+ } else
+ local_cpu_data->itm_next = new_itm;
+
+@@ -105,7 +247,12 @@ timer_interrupt (int irq, void *dev_id,
+ */
+ while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
+ new_itm += local_cpu_data->itm_delta;
++//#ifdef XEN
++// vcpu_set_next_timer(current);
++//#else
++//printf("***** timer_interrupt: Setting itm to %lx\n",new_itm);
+ ia64_set_itm(new_itm);
++//#endif
+ /* double check, in case we got hit by a (slow) PMI: */
+ } while (time_after_eq(ia64_get_itc(), new_itm));
+ return IRQ_HANDLED;
+@@ -120,6 +267,7 @@ ia64_cpu_local_tick (void)
+ int cpu = smp_processor_id();
+ unsigned long shift = 0, delta;
+
++printf("ia64_cpu_local_tick: about to call ia64_set_itv\n");
+ /* arrange for the cycle counter to generate a timer interrupt: */
+ ia64_set_itv(IA64_TIMER_VECTOR);
+
+@@ -133,6 +281,7 @@ ia64_cpu_local_tick (void)
+ shift = (2*(cpu - hi) + 1) * delta/hi/2;
+ }
+ local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
++printf("***** ia64_cpu_local_tick: Setting itm to %lx\n",local_cpu_data->itm_next);
+ ia64_set_itm(local_cpu_data->itm_next);
+ }
+
+@@ -160,6 +309,7 @@ ia64_init_itm (void)
+ * frequency and then a PAL call to determine the frequency ratio between the ITC
+ * and the base frequency.
+ */
++
+ status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+ &platform_base_freq, &platform_base_drift);
+ if (status != 0) {
+@@ -212,6 +362,7 @@ ia64_init_itm (void)
+ + itc_freq/2)/itc_freq;
+
+ if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
++#ifndef XEN
+ itc_interpolator.frequency = local_cpu_data->itc_freq;
+ itc_interpolator.drift = itc_drift;
+ #ifdef CONFIG_SMP
+@@ -228,6 +379,7 @@ ia64_init_itm (void)
+ if (!nojitter) itc_interpolator.jitter = 1;
+ #endif
+ register_time_interpolator(&itc_interpolator);
++#endif
+ }
+
+ /* Setup the CPU local timer tick */
+@@ -236,7 +388,9 @@ ia64_init_itm (void)
+
+ static struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
++#ifndef XEN
+ .flags = SA_INTERRUPT,
++#endif
+ .name = "timer"
+ };
+
+@@ -244,12 +398,16 @@ void __init
+ time_init (void)
+ {
+ register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
++#ifndef XEN
+ efi_gettimeofday(&xtime);
++#endif
+ ia64_init_itm();
+
++#ifndef XEN
+ /*
+ * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
+ * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
+ */
+ set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
++#endif
+ }
--- /dev/null
+ tlb.c | 10 ++++++++++
+ 1 files changed, 10 insertions(+)
+
+Index: linux-2.6.11/arch/ia64/mm/tlb.c
+===================================================================
+--- linux-2.6.11.orig/arch/ia64/mm/tlb.c 2005-03-02 01:38:38.000000000 -0600
++++ linux-2.6.11/arch/ia64/mm/tlb.c 2005-03-19 14:58:43.978400822 -0600
+@@ -21,7 +21,9 @@
+ #include <asm/mmu_context.h>
+ #include <asm/pgalloc.h>
+ #include <asm/pal.h>
++#ifndef XEN
+ #include <asm/tlbflush.h>
++#endif
+
+ static struct {
+ unsigned long mask; /* mask of supported purge page-sizes */
+@@ -43,6 +45,9 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
+ void
+ wrap_mmu_context (struct mm_struct *mm)
+ {
++#ifdef XEN
++printf("wrap_mmu_context: called, not implemented\n");
++#else
+ unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
+ struct task_struct *tsk;
+ int i;
+@@ -83,6 +88,7 @@ wrap_mmu_context (struct mm_struct *mm)
+ put_cpu();
+ }
+ local_flush_tlb_all();
++#endif
+ }
+
+ void
+@@ -132,6 +138,9 @@ EXPORT_SYMBOL(local_flush_tlb_all);
+ void
+ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
+ {
++#ifdef XEN
++printf("flush_tlb_range: called, not implemented\n");
++#else
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long size = end - start;
+ unsigned long nbits;
+@@ -163,6 +172,7 @@ flush_tlb_range (struct vm_area_struct *
+ # endif
+
+ ia64_srlz_i(); /* srlz.i implies srlz.d */
++#endif
+ }
+ EXPORT_SYMBOL(flush_tlb_range);
+
--- /dev/null
+ types.h | 7 +++++++
+ 1 files changed, 7 insertions(+)
+
+Index: linux-2.6.11/include/asm-ia64/types.h
+===================================================================
+--- linux-2.6.11.orig/include/asm-ia64/types.h 2005-03-02 01:37:49.000000000 -0600
++++ linux-2.6.11/include/asm-ia64/types.h 2005-03-19 14:58:47.628750770 -0600
+@@ -1,5 +1,12 @@
+ #ifndef _ASM_IA64_TYPES_H
+ #define _ASM_IA64_TYPES_H
++#ifdef XEN
++#ifndef __ASSEMBLY__
++typedef unsigned long ssize_t;
++typedef unsigned long size_t;
++typedef long long loff_t;
++#endif
++#endif
+
+ /*
+ * This file is never included by application software unless explicitly requested (e.g.,
--- /dev/null
+ unaligned.c | 27 +++++++++++++++++++++++++++
+ 1 files changed, 27 insertions(+)
+
+Index: linux-2.6.11/arch/ia64/kernel/unaligned.c
+===================================================================
+--- linux-2.6.11.orig/arch/ia64/kernel/unaligned.c 2005-03-02 01:38:25.000000000 -0600
++++ linux-2.6.11/arch/ia64/kernel/unaligned.c 2005-03-19 14:58:51.269335202 -0600
+@@ -15,8 +15,10 @@
+ */
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
++#ifndef XEN
+ #include <linux/smp_lock.h>
+ #include <linux/tty.h>
++#endif
+
+ #include <asm/intrinsics.h>
+ #include <asm/processor.h>
+@@ -24,7 +26,16 @@
+ #include <asm/uaccess.h>
+ #include <asm/unaligned.h>
+
++#ifdef XEN
++#define ia64_peek(x...) printk("ia64_peek: called, not implemented\n")
++#define ia64_poke(x...) printk("ia64_poke: called, not implemented\n")
++#define ia64_sync_fph(x...) printk("ia64_sync_fph: called, not implemented\n")
++#define ia64_flush_fph(x...) printk("ia64_flush_fph: called, not implemented\n")
++#define die_if_kernel(x...) printk("die_if_kernel: called, not implemented\n")
++#define jiffies 0
++#else
+ extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
++#endif
+
+ #undef DEBUG_UNALIGNED_TRAP
+
+@@ -437,7 +448,11 @@ get_rse_reg (struct pt_regs *regs, unsig
+ }
+
+
++#ifdef XEN
++void
++#else
+ static void
++#endif
+ setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
+ {
+ struct switch_stack *sw = (struct switch_stack *) regs - 1;
+@@ -611,7 +626,11 @@ getfpreg (unsigned long regnum, struct i
+ }
+
+
++#ifdef XEN
++void
++#else
+ static void
++#endif
+ getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
+ {
+ struct switch_stack *sw = (struct switch_stack *) regs - 1;
+@@ -1298,7 +1317,9 @@ ia64_handle_unaligned (unsigned long ifa
+ mm_segment_t old_fs = get_fs();
+ unsigned long bundle[2];
+ unsigned long opcode;
++#ifndef XEN
+ struct siginfo si;
++#endif
+ const struct exception_table_entry *eh = NULL;
+ union {
+ unsigned long l;
+@@ -1317,6 +1338,9 @@ ia64_handle_unaligned (unsigned long ifa
+ * user-level unaligned accesses. Otherwise, a clever program could trick this
+ * handler into reading an arbitrary kernel addresses...
+ */
++#ifdef XEN
++printk("ia64_handle_unaligned: called, not working yet\n");
++#else
+ if (!user_mode(regs))
+ eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
+ if (user_mode(regs) || eh) {
+@@ -1353,6 +1377,7 @@ ia64_handle_unaligned (unsigned long ifa
+
+ if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
+ goto failure;
++#endif
+
+ /*
+ * extract the instruction from the bundle given the slot number
+@@ -1493,6 +1518,7 @@ ia64_handle_unaligned (unsigned long ifa
+ /* NOT_REACHED */
+ }
+ force_sigbus:
++#ifndef XEN
+ si.si_signo = SIGBUS;
+ si.si_errno = 0;
+ si.si_code = BUS_ADRALN;
+@@ -1501,5 +1527,6 @@ ia64_handle_unaligned (unsigned long ifa
+ si.si_isr = 0;
+ si.si_imm = 0;
+ force_sig_info(SIGBUS, &si, current);
++#endif
+ goto done;
+ }
--- /dev/null
+ wait.h | 6 ++++++
+ 1 files changed, 6 insertions(+)
+
+Index: linux-2.6.11/include/linux/wait.h
+===================================================================
+--- linux-2.6.11.orig/include/linux/wait.h 2005-03-02 01:38:10.000000000 -0600
++++ linux-2.6.11/include/linux/wait.h 2005-03-19 15:00:23.691156973 -0600
+@@ -136,7 +136,11 @@ static inline void __remove_wait_queue(w
+ list_del(&old->task_list);
+ }
+
++#ifdef XEN
++void FASTCALL(__wake_up(struct task_struct *p));
++#else
+ void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
++#endif
+ extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
+ extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
+ void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
+@@ -147,6 +151,7 @@ int FASTCALL(out_of_line_wait_on_bit(voi
+ int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
+ wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
+
++#ifndef XEN
+ #define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
+ #define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
+ #define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
+@@ -155,6 +160,7 @@ wait_queue_head_t *FASTCALL(bit_waitqueu
+ #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
+ #define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
+ #define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
++#endif
+
+ #define __wait_event(wq, condition) \
+ do { \
# run in xen-X.X/xen directory after unpacking linux in same directory
XEN=$PWD
-#LINUX=$XEN/linux-2.6.7
-LINUX=$XEN/../../linux-2.6.7
-LINUXPATCH=$XEN/arch/ia64/patch/linux-2.6.7
+LINUX=$XEN/../../linux-2.6.11
+LINUXPATCH=$XEN/arch/ia64/patch/linux-2.6.11
XENPATCH=$XEN/arch/ia64/patch/xen-2.0.1
cp_patch ()
#cp_patch mm/slab.c arch/ia64/slab.c slab.c
# following renamed to avoid conflict
-softlink kernel/extable.c arch/ia64/linuxextable.c
+cp_patch kernel/extable.c arch/ia64/linuxextable.c linuxextable.c
cp_patch arch/ia64/mm/contig.c arch/ia64/mm_contig.c mm_contig.c
cp_patch arch/ia64/mm/tlb.c arch/ia64/tlb.c tlb.c
# xen/include/asm-generic files
-softlink include/asm-generic/cpumask_const_value.h include/asm-generic/cpumask_const_value.h cpumask_const_value.h
-softlink include/asm-generic/cpumask.h include/asm-generic/cpumask.h cpumask.h
-softlink include/asm-generic/cpumask_up.h include/asm-generic/cpumask_up.h cpumask_up.h
-softlink include/asm-generic/cpumask_arith.h include/asm-generic/cpumask_arith.h cpumask_arith.h
+softlink include/asm-generic/bug.h include/asm-generic/bug.h bug.h
softlink include/asm-generic/div64.h include/asm-generic/div64.h div64.h
+softlink include/asm-generic/errno.h include/asm-generic/errno.h
+softlink include/asm-generic/errno-base.h include/asm-generic/errno-base.h
softlink include/asm-generic/ide_iops.h include/asm-generic/ide_iops.h ide_iops.h
+softlink include/asm-generic/iomap.h include/asm-generic/iomap.h iomap.h
softlink include/asm-generic/pci-dma-compat.h include/asm-generic/pci-dma-compat.h pci-dma-compat.h
softlink include/asm-generic/pci.h include/asm-generic/pci.h pci.h
softlink include/asm-generic/pgtable.h include/asm-generic/pgtable.h pgtable.h
+softlink include/asm-generic/pgtable-nopud.h include/asm-generic/pgtable-nopud.h pgtable-nopud.h
softlink include/asm-generic/sections.h include/asm-generic/sections.h sections.h
softlink include/asm-generic/topology.h include/asm-generic/topology.h topology.h
softlink include/asm-generic/vmlinux.lds.h include/asm-generic/vmlinux.lds.h vmlinux.lds.h
cp_patch arch/ia64/hp/sim/hpsim_ssc.h include/asm-ia64/hpsim_ssc.h hpsim_ssc.h
cp_patch include/asm-ia64/current.h include/asm-ia64/current.h current.h
-cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h
+#cp_patch include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h gcc_intrin.h
+softlink include/asm-ia64/gcc_intrin.h include/asm-ia64/gcc_intrin.h
cp_patch include/asm-ia64/hardirq.h include/asm-ia64/hardirq.h hardirq.h
-cp_patch include/asm-ia64/hw_irq.h include/asm-ia64/hw_irq.h hw_irq.h
-cp_patch include/asm-ia64/ide.h include/asm-ia64/ide.h ide.h
+#cp_patch include/asm-ia64/hw_irq.h include/asm-ia64/hw_irq.h hw_irq.h
+softlink include/asm-ia64/hw_irq.h include/asm-ia64/hw_irq.h
+#cp_patch include/asm-ia64/ide.h include/asm-ia64/ide.h ide.h
cp_patch include/asm-ia64/io.h include/asm-ia64/io.h io.h
cp_patch include/asm-ia64/irq.h include/asm-ia64/irq.h irq.h
cp_patch include/asm-ia64/kregs.h include/asm-ia64/kregs.h kregs.h
softlink include/asm-ia64/cacheflush.h include/asm-ia64/cacheflush.h
softlink include/asm-ia64/cache.h include/asm-ia64/cache.h
softlink include/asm-ia64/checksum.h include/asm-ia64/checksum.h
-softlink include/asm-ia64/cpumask.h include/asm-ia64/cpumask.h
softlink include/asm-ia64/delay.h include/asm-ia64/delay.h
softlink include/asm-ia64/div64.h include/asm-ia64/div64.h
softlink include/asm-ia64/dma.h include/asm-ia64/dma.h
softlink include/asm-ia64/dma-mapping.h include/asm-ia64/dma-mapping.h
+softlink include/asm-ia64/errno.h include/asm-ia64/errno.h
softlink include/asm-ia64/fpu.h include/asm-ia64/fpu.h
softlink include/asm-ia64/hdreg.h include/asm-ia64/hdreg.h
softlink include/asm-ia64/ia32.h include/asm-ia64/ia32.h
softlink include/asm-ia64/scatterlist.h include/asm-ia64/scatterlist.h
softlink include/asm-ia64/sections.h include/asm-ia64/sections.h
softlink include/asm-ia64/semaphore.h include/asm-ia64/semaphore.h
+softlink include/asm-ia64/setup.h include/asm-ia64/setup.h
softlink include/asm-ia64/sigcontext.h include/asm-ia64/sigcontext.h
softlink include/asm-ia64/signal.h include/asm-ia64/signal.h
softlink include/asm-ia64/smp.h include/asm-ia64/smp.h
# xen/include/asm-ia64/linux/*.h (== linux/include/linux/*.h)
cp_patch include/linux/bootmem.h include/asm-ia64/linux/bootmem.h bootmem.h
-cp_patch include/linux/efi.h include/asm-ia64/linux/efi.h efi.h
+cp_patch include/linux/cpumask.h include/asm-ia64/linux/cpumask.h cpumask.h
+#cp_patch include/linux/dma-mapping.h include/asm-ia64/linux/dma-mapping.h dma-mapping.h
+softlink include/linux/dma-mapping.h include/asm-ia64/linux/dma-mapping.h
+#cp_patch include/linux/efi.h include/asm-ia64/linux/efi.h efi.h
+softlink include/linux/efi.h include/asm-ia64/linux/efi.h
+cp_patch include/linux/hardirq.h include/asm-ia64/linux/hardirq.h linuxhardirq.h
#cp_patch include/linux/init_task.h include/asm-ia64/linux/init_task.h init_task.h
cp_patch include/linux/interrupt.h include/asm-ia64/linux/interrupt.h interrupt.h
cp_patch include/linux/mmzone.h include/asm-ia64/linux/mmzone.h mmzone.h
softlink include/linux/bcd.h include/asm-ia64/linux/bcd.h
softlink include/linux/bitmap.h include/asm-ia64/linux/bitmap.h
softlink include/linux/bitops.h include/asm-ia64/linux/bitops.h
-softlink include/linux/cpumask.h include/asm-ia64/linux/cpumask.h
-softlink include/linux/dma-mapping.h include/asm-ia64/linux/dma-mapping.h
+softlink include/linux/err.h include/asm-ia64/linux/err.h
softlink include/linux/gfp.h include/asm-ia64/linux/gfp.h
softlink include/linux/initrd.h include/asm-ia64/linux/initrd.h
softlink include/linux/kmalloc_sizes.h include/asm-ia64/linux/kmalloc_sizes.h
null include/asm-ia64/linux/file.h
null include/asm-ia64/linux/module.h
null include/asm-ia64/linux/swap.h
+null include/asm-ia64/linux/device.h
+null include/asm-ia64/linux/proc_fs.h
+null include/asm-ia64/linux/rtc.h
+null include/asm-ia64/linux/profile.h
softlink include/linux/byteorder/generic.h include/asm-ia64/linux/byteorder/generic.h
softlink include/linux/byteorder/little_endian.h include/asm-ia64/linux/byteorder/little_endian.h
(p7) br.cond.sptk .stack_overlaps
;;
movl r25=PAGE_KERNEL
- dep r20=0,r13,60,4 // physical address of "current"
+ dep r21=0,r13,60,4 // physical address of "current"
;;
- or r23=r25,r20 // construct PA | page properties
+ or r23=r25,r21 // construct PA | page properties
mov r25=IA64_GRANULE_SHIFT<<2
;;
ptr.d r13,r25
;;
movl r25=PAGE_KERNEL
;;
- mov r20=loc2 // saved percpu physical address
+ mov r21=loc2 // saved percpu physical address
;;
- or r23=r25,r20 // construct PA | page properties
+ or r23=r25,r21 // construct PA | page properties
mov r24=PERCPU_PAGE_SHIFT<<2
;;
ptr.d r22,r24
;;
movl r25=PAGE_KERNEL
;;
- mov r20=loc6 // saved vhpt physical address
+ mov r21=loc6 // saved vhpt physical address
;;
- or r23=r25,r20 // construct PA | page properties
+ or r23=r25,r21 // construct PA | page properties
mov r24=VHPT_PAGE_SHIFT<<2
;;
ptr.d r22,r24
;;
movl r25=PAGE_KERNEL
;;
- mov r20=loc5 // saved sharedinfo physical address
+ mov r21=loc5 // saved sharedinfo physical address
;;
- or r23=r25,r20 // construct PA | page properties
+ or r23=r25,r21 // construct PA | page properties
mov r24=PAGE_SHIFT<<2
;;
ptr.d r22,r24
DEFINE_PER_CPU(struct page_state, page_states) = {0};
unsigned long totalram_pages;
+void __mod_page_state(unsigned offset, unsigned long delta)
+{
+ unsigned long flags;
+ void* ptr;
+
+ local_irq_save(flags);
+ ptr = &__get_cpu_var(page_states);
+ *(unsigned long*)(ptr + offset) += delta;
+ local_irq_restore(flags);
+}
+
///////////////////////////////
// from arch/x86/flushtlb.c
///////////////////////////////
#define CLEAR_BITMAP(name,bits) \
memset(name, 0, BITS_TO_LONGS(bits)*sizeof(unsigned long))
-// from linux/include/linux/compiler.h
-#define __user
-
// FIXME?: x86-ism used in xen/mm.h
#define LOCK_PREFIX
// linux/include/linux/compiler.h
#define __attribute_const__
+#define __user
+//#define __kernel
+//#define __safe
+#define __force
+#define __iomem
+#define __chk_user_ptr(x) (void)0
+//#define __chk_io_ptr(x) (void)0
+//#define __builtin_warning(x, y...) (1)
+//#define __acquires(x)
+//#define __releases(x)
+//#define __acquire(x) (void)0
+//#define __release(x) (void)0
+//#define __cond_lock(x) (x)
+#define __must_check
+#define __deprecated
// xen/include/asm/config.h
#define HZ 100
//#define CONFIG_NR_CPUS 16
#define barrier() __asm__ __volatile__("": : :"memory")
+// linux/include/spinlock.h
+#define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED
+
///////////////////////////////////////////////////////////////
// xen/include/asm/config.h
// Natural boundary upon TR size to define xenheap space
typedef unsigned long page_flags_t;
-// from linux/include/linux/mm.h
-
-extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
-extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
-extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
-
-/*
- * On a two-level page table, this ends up being trivial. Thus the
- * inlining and the symmetry break with pte_alloc_map() that does all
- * of this out-of-line.
- */
-static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
-{
- if (pgd_none(*pgd))
- return __pmd_alloc(mm, pgd, address);
- return pmd_offset(pgd, address);
-}
-
-
/*
* Per-page-frame information.
*/